1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // See malloc.h for overview.
7 // TODO(rsc): double-check stats.
17 #include "interface.h"
20 // Map gccgo field names to gc field names.
21 // Eface aka __go_empty_interface.
22 #define type __type_descriptor
23 // Type aka __go_type_descriptor
25 #define string __reflection
26 #define KindPtr GO_PTR
27 #define KindNoPointers GO_NO_POINTERS
29 // GCCGO SPECIFIC CHANGE
31 // There is a long comment in runtime_mallocinit about where to put the heap
32 // on a 64-bit system. It makes assumptions that are not valid on linux/arm64
33 // -- it assumes user space can choose the lower 47 bits of a pointer, but on
34 // linux/arm64 we can only choose the lower 39 bits. This means the heap is
35 // roughly a quarter of the available address space and we cannot choose a bit
36 // pattern that all pointers will have -- luckily the GC is mostly precise
37 // these days so this doesn't matter all that much. The kernel (as of 3.13)
38 // will allocate address space starting either down from 0x7fffffffff or up
39 // from 0x2000000000, so we put the heap roughly in the middle of these two
40 // addresses to minimize the chance that a non-heap allocation will get in the
43 // This all means that there isn't much point in trying 256 different
44 // locations for the heap on such systems.
46 #define HeapBase(i) ((void*)(uintptr)(0x40ULL<<32))
47 #define HeapBaseOptions 1
49 #define HeapBase(i) ((void*)(uintptr)(i<<40|0x00c0ULL<<32))
50 #define HeapBaseOptions 0x80
52 // END GCCGO SPECIFIC CHANGE
54 // Mark mheap as 'no pointers', it does not contain interesting pointers but occupies ~45K.
58 int32 runtime_checking;
60 extern MStats mstats; // defined in zruntime_def_$GOOS_$GOARCH.go
62 extern volatile intgo runtime_MemProfileRate
63 __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
65 static MSpan* largealloc(uint32, uintptr*);
66 static void profilealloc(void *v, uintptr size);
67 static void settype(MSpan *s, void *v, uintptr typ);
69 // Allocate an object of at least size bytes.
70 // Small objects are allocated from the per-thread cache's free lists.
71 // Large objects (> 32 kB) are allocated straight from the heap.
72 // If the block will be freed with runtime_free(), typ must be 0.
74 runtime_mallocgc(uintptr size, uintptr typ, uint32 flag)
79 uintptr tinysize, size1;
89 // All 0-length allocations use this pointer.
90 // The language does not require the allocations to
91 // have distinct values.
92 return &runtime_zerobase;
98 // We should not be called in between __go_set_closure and the
99 // actual function call, but cope with it if we are.
100 closure = g->closure;
103 if(m->mcache == nil && g->ncgo > 0) {
104 // For gccgo this case can occur when a cgo or SWIG function
105 // has an interface return type and the function
106 // returns a non-pointer, so memory allocation occurs
107 // after syscall.Cgocall but before syscall.CgocallDone.
108 // We treat it as a callback.
109 runtime_exitsyscall();
112 flag |= FlagNoInvokeGC;
115 if(runtime_gcwaiting() && g != m->g0 && m->locks == 0 && !(flag & FlagNoInvokeGC)) {
120 runtime_throw("malloc/free - deadlock");
121 // Disable preemption during settype.
122 // We can not use m->mallocing for this, because settype calls mallocgc.
126 if(DebugTypeAtBlockEnd)
127 size += sizeof(uintptr);
130 if(!runtime_debug.efence && size <= MaxSmallSize) {
131 if((flag&(FlagNoScan|FlagNoGC)) == FlagNoScan && size < TinySize) {
134 // Tiny allocator combines several tiny allocation requests
135 // into a single memory block. The resulting memory block
136 // is freed when all subobjects are unreachable. The subobjects
137 // must be FlagNoScan (don't have pointers), this ensures that
138 // the amount of potentially wasted memory is bounded.
140 // Size of the memory block used for combining (TinySize) is tunable.
141 // Current setting is 16 bytes, which relates to 2x worst case memory
142 // wastage (when all but one subobjects are unreachable).
143 // 8 bytes would result in no wastage at all, but provides less
144 // opportunities for combining.
145 // 32 bytes provides more opportunities for combining,
146 // but can lead to 4x worst case wastage.
147 // The best case winning is 8x regardless of block size.
149 // Objects obtained from tiny allocator must not be freed explicitly.
150 // So when an object will be freed explicitly, we ensure that
151 // its size >= TinySize.
153 // SetFinalizer has a special case for objects potentially coming
154 // from tiny allocator, it such case it allows to set finalizers
155 // for an inner byte of a memory block.
157 // The main targets of tiny allocator are small strings and
158 // standalone escaping variables. On a json benchmark
159 // the allocator reduces number of allocations by ~12% and
160 // reduces heap size by ~20%.
162 tinysize = c->tinysize;
163 if(size <= tinysize) {
165 // Align tiny pointer for required (conservative) alignment.
167 tiny = (byte*)ROUND((uintptr)tiny, 8);
168 else if((size&3) == 0)
169 tiny = (byte*)ROUND((uintptr)tiny, 4);
170 else if((size&1) == 0)
171 tiny = (byte*)ROUND((uintptr)tiny, 2);
172 size1 = size + (tiny - c->tiny);
173 if(size1 <= tinysize) {
174 // The object fits into existing tiny block.
177 c->tinysize -= size1;
181 runtime_entersyscall();
182 g->closure = closure;
186 // Allocate a new TinySize block.
187 s = c->alloc[TinySizeClass];
188 if(s->freelist == nil)
189 s = runtime_MCache_Refill(c, TinySizeClass);
194 if(next != nil) // prefetching nil leads to a DTLB miss
198 // See if we need to replace the existing tiny block with the new one
199 // based on amount of remaining free space.
200 if(TinySize-size > tinysize) {
201 c->tiny = (byte*)v + size;
202 c->tinysize = TinySize - size;
207 // Allocate from mcache free lists.
208 // Inlined version of SizeToClass().
210 sizeclass = runtime_size_to_class8[(size+7)>>3];
212 sizeclass = runtime_size_to_class128[(size-1024+127) >> 7];
213 size = runtime_class_to_size[sizeclass];
214 s = c->alloc[sizeclass];
215 if(s->freelist == nil)
216 s = runtime_MCache_Refill(c, sizeclass);
221 if(next != nil) // prefetching nil leads to a DTLB miss
223 if(!(flag & FlagNoZero)) {
225 // block is zeroed iff second word is zero ...
226 if(size > 2*sizeof(uintptr) && ((uintptr*)v)[1] != 0)
227 runtime_memclr((byte*)v, size);
230 c->local_cachealloc += size;
232 // Allocate directly from heap.
233 s = largealloc(flag, &size);
234 v = (void*)(s->start << PageShift);
239 else if(!(flag & FlagNoScan))
242 if(DebugTypeAtBlockEnd)
243 *(uintptr*)((uintptr)v+size-sizeof(uintptr)) = typ;
246 // TODO: save type even if FlagNoScan? Potentially expensive but might help
247 // heap profiling/tracing.
248 if(UseSpanType && !(flag & FlagNoScan) && typ != 0)
251 if(runtime_debug.allocfreetrace)
252 runtime_tracealloc(v, size, typ);
254 if(!(flag & FlagNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
255 if(size < (uintptr)rate && size < (uintptr)(uint32)c->next_sample)
256 c->next_sample -= size;
258 profilealloc(v, size);
263 if(!(flag & FlagNoInvokeGC) && mstats.heap_alloc >= mstats.next_gc)
267 runtime_entersyscall();
269 g->closure = closure;
275 largealloc(uint32 flag, uintptr *sizep)
277 uintptr npages, size;
281 // Allocate directly from heap.
283 if(size + PageSize < size)
284 runtime_throw("out of memory");
285 npages = size >> PageShift;
286 if((size & PageMask) != 0)
288 s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1, !(flag & FlagNoZero));
290 runtime_throw("out of memory");
291 s->limit = (byte*)(s->start<<PageShift) + size;
292 *sizep = npages<<PageShift;
293 v = (void*)(s->start << PageShift);
294 // setup for mark sweep
295 runtime_markspan(v, 0, 0, true);
300 profilealloc(void *v, uintptr size)
306 c = runtime_m()->mcache;
307 rate = runtime_MemProfileRate;
309 // pick next profile time
310 // If you change this, also change allocmcache.
311 if(rate > 0x3fffffff) // make 2*rate not overflow
313 next = runtime_fastrand1() % (2*rate);
314 // Subtract the "remainder" of the current allocation.
315 // Otherwise objects that are close in size to sampling rate
316 // will be under-sampled, because we consistently discard this remainder.
317 next -= (size - c->next_sample);
320 c->next_sample = next;
322 runtime_MProf_Malloc(v, size);
326 __go_alloc(uintptr size)
328 return runtime_mallocgc(size, 0, FlagNoInvokeGC);
331 // Free the object whose base pointer is v.
344 // If you change this also change mgc0.c:/^sweep,
345 // which has a copy of the guts of free.
349 runtime_throw("malloc/free - deadlock");
352 if(!runtime_mlookup(v, nil, nil, &s)) {
353 runtime_printf("free %p: not an allocated block\n", v);
354 runtime_throw("free runtime_mlookup");
357 sizeclass = s->sizeclass;
358 // Objects that are smaller than TinySize can be allocated using tiny alloc,
359 // if then such object is combined with an object with finalizer, we will crash.
361 runtime_throw("freeing too small block");
363 if(runtime_debug.allocfreetrace)
364 runtime_tracefree(v, size);
366 // Ensure that the span is swept.
367 // If we free into an unswept span, we will corrupt GC bitmaps.
368 runtime_MSpan_EnsureSwept(s);
370 if(s->specials != nil)
371 runtime_freeallspecials(s, v, size);
377 // Must mark v freed before calling unmarkspan and MHeap_Free:
378 // they might coalesce v into other spans and change the bitmap further.
379 runtime_markfreed(v);
380 runtime_unmarkspan(v, 1<<PageShift);
381 // NOTE(rsc,dvyukov): The original implementation of efence
382 // in CL 22060046 used SysFree instead of SysFault, so that
383 // the operating system would eventually give the memory
384 // back to us again, so that an efence program could run
385 // longer without running out of memory. Unfortunately,
386 // calling SysFree here without any kind of adjustment of the
387 // heap data structures means that when the memory does
388 // come back to us, we have the wrong metadata for it, either in
389 // the MSpan structures or in the garbage collection bitmap.
390 // Using SysFault here means that the program will run out of
391 // memory fairly quickly in efence mode, but at least it won't
392 // have mysterious crashes due to confused memory reuse.
393 // It should be possible to switch back to SysFree if we also
394 // implement and then call some kind of MHeap_DeleteSpan.
395 if(runtime_debug.efence)
396 runtime_SysFault((void*)(s->start<<PageShift), size);
398 runtime_MHeap_Free(&runtime_mheap, s, 1);
399 c->local_nlargefree++;
400 c->local_largefree += size;
403 if(size > 2*sizeof(uintptr))
404 ((uintptr*)v)[1] = (uintptr)0xfeedfeedfeedfeedll; // mark as "needs to be zeroed"
405 else if(size > sizeof(uintptr))
406 ((uintptr*)v)[1] = 0;
407 // Must mark v freed before calling MCache_Free:
408 // it might coalesce v and other blocks into a bigger span
409 // and change the bitmap further.
410 c->local_nsmallfree[sizeclass]++;
411 c->local_cachealloc -= size;
412 if(c->alloc[sizeclass] == s) {
413 // We own the span, so we can just add v to the freelist
414 runtime_markfreed(v);
415 ((MLink*)v)->next = s->freelist;
419 // Someone else owns this span. Add to free queue.
420 runtime_MCache_Free(c, v, sizeclass, size);
427 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp)
436 m->mcache->local_nlookup++;
437 if (sizeof(void*) == 4 && m->mcache->local_nlookup >= (1<<30)) {
438 // purge cache stats to prevent overflow
439 runtime_lock(&runtime_mheap);
440 runtime_purgecachedstats(m->mcache);
441 runtime_unlock(&runtime_mheap);
444 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
448 runtime_checkfreed(v, 1);
456 p = (byte*)((uintptr)s->start<<PageShift);
457 if(s->sizeclass == 0) {
462 *size = s->npages<<PageShift;
468 i = ((byte*)v - p)/n;
478 runtime_purgecachedstats(MCache *c)
483 // Protected by either heap or GC lock.
485 mstats.heap_alloc += c->local_cachealloc;
486 c->local_cachealloc = 0;
487 mstats.nlookup += c->local_nlookup;
488 c->local_nlookup = 0;
489 h->largefree += c->local_largefree;
490 c->local_largefree = 0;
491 h->nlargefree += c->local_nlargefree;
492 c->local_nlargefree = 0;
493 for(i=0; i<(int32)nelem(c->local_nsmallfree); i++) {
494 h->nsmallfree[i] += c->local_nsmallfree[i];
495 c->local_nsmallfree[i] = 0;
499 extern uintptr runtime_sizeof_C_MStats
500 __asm__ (GOSYM_PREFIX "runtime.Sizeof_C_MStats");
502 // Size of the trailing by_size array differs between Go and C,
503 // NumSizeClasses was changed, but we can not change Go struct because of backward compatibility.
504 // sizeof_C_MStats is what C thinks about size of Go struct.
506 // Initialized in mallocinit because it's defined in go/runtime/mem.go.
508 #define MaxArena32 (2U<<30)
511 runtime_mallocinit(void)
514 uintptr arena_size, bitmap_size, spans_size, p_size;
520 runtime_sizeof_C_MStats = sizeof(MStats) - (NumSizeClasses - 61) * sizeof(mstats.by_size[0]);
538 if(runtime_class_to_size[TinySizeClass] != TinySize)
539 runtime_throw("bad TinySizeClass");
541 // limit = runtime_memlimit();
542 // See https://code.google.com/p/go/issues/detail?id=5049
543 // TODO(rsc): Fix after 1.1.
546 // Set up the allocation arena, a contiguous area of memory where
547 // allocated data will be found. The arena begins with a bitmap large
548 // enough to hold 4 bits per allocated word.
549 if(sizeof(void*) == 8 && (limit == 0 || limit > (1<<30))) {
550 // On a 64-bit machine, allocate from a single contiguous reservation.
551 // 128 GB (MaxMem) should be big enough for now.
553 // The code will work with the reservation at any address, but ask
554 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
555 // Allocating a 128 GB region takes away 37 bits, and the amd64
556 // doesn't let us choose the top 17 bits, so that leaves the 11 bits
557 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
558 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
559 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
560 // UTF-8 sequences, and they are otherwise as far away from
561 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
562 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
563 // on OS X during thread allocations. 0x00c0 causes conflicts with
564 // AddressSanitizer which reserves all memory up to 0x0100.
565 // These choices are both for debuggability and to reduce the
566 // odds of the conservative garbage collector not collecting memory
567 // because some non-pointer block of memory had a bit pattern
568 // that matched a memory address.
570 // Actually we reserve 136 GB (because the bitmap ends up being 8 GB)
571 // but it hardly matters: e0 00 is not valid UTF-8 either.
573 // If this fails we fall back to the 32 bit memory mechanism
575 bitmap_size = arena_size / (sizeof(void*)*8/4);
576 spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]);
577 spans_size = ROUND(spans_size, PageSize);
578 for(i = 0; i < HeapBaseOptions; i++) {
580 p_size = bitmap_size + spans_size + arena_size + PageSize;
581 p = runtime_SysReserve(p, p_size, &reserved);
587 // On a 32-bit machine, we can't typically get away
588 // with a giant virtual address space reservation.
589 // Instead we map the memory information bitmap
590 // immediately after the data segment, large enough
591 // to handle another 2GB of mappings (256 MB),
592 // along with a reservation for another 512 MB of memory.
593 // When that gets used up, we'll start asking the kernel
594 // for any memory anywhere and hope it's in the 2GB
595 // following the bitmap (presumably the executable begins
596 // near the bottom of memory, so we'll have to use up
597 // most of memory before the kernel resorts to giving out
598 // memory before the beginning of the text segment).
600 // Alternatively we could reserve 512 MB bitmap, enough
601 // for 4GB of mappings, and then accept any memory the
602 // kernel threw at us, but normally that's a waste of 512 MB
603 // of address space, which is probably too much in a 32-bit world.
604 bitmap_size = MaxArena32 / (sizeof(void*)*8/4);
605 arena_size = 512<<20;
606 spans_size = MaxArena32 / PageSize * sizeof(runtime_mheap.spans[0]);
607 if(limit > 0 && arena_size+bitmap_size+spans_size > limit) {
608 bitmap_size = (limit / 9) & ~((1<<PageShift) - 1);
609 arena_size = bitmap_size * 8;
610 spans_size = arena_size / PageSize * sizeof(runtime_mheap.spans[0]);
612 spans_size = ROUND(spans_size, PageSize);
614 // SysReserve treats the address we ask for, end, as a hint,
615 // not as an absolute requirement. If we ask for the end
616 // of the data segment but the operating system requires
617 // a little more space before we can start allocating, it will
618 // give out a slightly higher pointer. Except QEMU, which
619 // is buggy, as usual: it won't adjust the pointer upward.
620 // So adjust it upward a little bit ourselves: 1/4 MB to get
621 // away from the running binary image and then round up
623 p = (byte*)ROUND((uintptr)_end + (1<<18), 1<<20);
624 p_size = bitmap_size + spans_size + arena_size + PageSize;
625 p = runtime_SysReserve(p, p_size, &reserved);
627 runtime_throw("runtime: cannot reserve arena virtual address space");
630 // PageSize can be larger than OS definition of page size,
631 // so SysReserve can give us a PageSize-unaligned pointer.
632 // To overcome this we ask for PageSize more and round up the pointer.
633 p1 = (byte*)ROUND((uintptr)p, PageSize);
635 runtime_mheap.spans = (MSpan**)p1;
636 runtime_mheap.bitmap = p1 + spans_size;
637 runtime_mheap.arena_start = p1 + spans_size + bitmap_size;
638 runtime_mheap.arena_used = runtime_mheap.arena_start;
639 runtime_mheap.arena_end = p + p_size;
640 runtime_mheap.arena_reserved = reserved;
642 if(((uintptr)runtime_mheap.arena_start & (PageSize-1)) != 0)
643 runtime_throw("misrounded allocation in mallocinit");
645 // Initialize the rest of the allocator.
646 runtime_MHeap_Init(&runtime_mheap);
647 runtime_m()->mcache = runtime_allocmcache();
650 runtime_free(runtime_malloc(TinySize));
654 runtime_MHeap_SysAlloc(MHeap *h, uintptr n)
661 if(n > (uintptr)(h->arena_end - h->arena_used)) {
662 // We are in 32-bit mode, maybe we didn't use all possible address space yet.
663 // Reserve some more space.
666 p_size = ROUND(n + PageSize, 256<<20);
667 new_end = h->arena_end + p_size;
668 if(new_end <= h->arena_start + MaxArena32) {
669 // TODO: It would be bad if part of the arena
670 // is reserved and part is not.
671 p = runtime_SysReserve(h->arena_end, p_size, &reserved);
672 if(p == h->arena_end) {
673 h->arena_end = new_end;
674 h->arena_reserved = reserved;
676 else if(p+p_size <= h->arena_start + MaxArena32) {
677 // Keep everything page-aligned.
678 // Our pages are bigger than hardware pages.
679 h->arena_end = p+p_size;
680 h->arena_used = p + (-(uintptr)p&(PageSize-1));
681 h->arena_reserved = reserved;
685 runtime_SysFree(p, p_size, &stat);
689 if(n <= (uintptr)(h->arena_end - h->arena_used)) {
690 // Keep taking from our reservation.
692 runtime_SysMap(p, n, h->arena_reserved, &mstats.heap_sys);
694 runtime_MHeap_MapBits(h);
695 runtime_MHeap_MapSpans(h);
697 if(((uintptr)p & (PageSize-1)) != 0)
698 runtime_throw("misrounded allocation in MHeap_SysAlloc");
702 // If using 64-bit, our reservation is all we have.
703 if((uintptr)(h->arena_end - h->arena_start) >= MaxArena32)
706 // On 32-bit, once the reservation is gone we can
707 // try to get memory at a location chosen by the OS
708 // and hope that it is in the range we allocated bitmap for.
709 p_size = ROUND(n, PageSize) + PageSize;
710 p = runtime_SysAlloc(p_size, &mstats.heap_sys);
714 if(p < h->arena_start || (uintptr)(p+p_size - h->arena_start) >= MaxArena32) {
715 runtime_printf("runtime: memory allocated by OS (%p) not in usable range [%p,%p)\n",
716 p, h->arena_start, h->arena_start+MaxArena32);
717 runtime_SysFree(p, p_size, &mstats.heap_sys);
722 p += -(uintptr)p & (PageSize-1);
723 if(p+n > h->arena_used) {
725 if(p_end > h->arena_end)
726 h->arena_end = p_end;
727 runtime_MHeap_MapBits(h);
728 runtime_MHeap_MapSpans(h);
731 if(((uintptr)p & (PageSize-1)) != 0)
732 runtime_throw("misrounded allocation in MHeap_SysAlloc");
745 PersistentAllocChunk = 256<<10,
746 PersistentAllocMaxBlock = 64<<10, // VM reservation granularity is 64K on windows
749 // Wrapper around SysAlloc that can allocate small chunks.
750 // There is no associated free operation.
751 // Intended for things like function/type/debug-related persistent data.
752 // If align is 0, uses default align (currently 8).
754 runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat)
760 runtime_throw("persistentalloc: align is not a power of 2");
762 runtime_throw("persistentalloc: align is too large");
765 if(size >= PersistentAllocMaxBlock)
766 return runtime_SysAlloc(size, stat);
767 runtime_lock(&persistent);
768 persistent.pos = (byte*)ROUND((uintptr)persistent.pos, align);
769 if(persistent.pos + size > persistent.end) {
770 persistent.pos = runtime_SysAlloc(PersistentAllocChunk, &mstats.other_sys);
771 if(persistent.pos == nil) {
772 runtime_unlock(&persistent);
773 runtime_throw("runtime: cannot allocate memory");
775 persistent.end = persistent.pos + PersistentAllocChunk;
778 persistent.pos += size;
779 runtime_unlock(&persistent);
780 if(stat != &mstats.other_sys) {
781 // reaccount the allocation against provided stat
782 runtime_xadd64(stat, size);
783 runtime_xadd64(&mstats.other_sys, -(uint64)size);
789 settype(MSpan *s, void *v, uintptr typ)
791 uintptr size, ofs, j, t;
792 uintptr ntypes, nbytes2, nbytes3;
796 if(s->sizeclass == 0) {
797 s->types.compression = MTypes_Single;
802 ofs = ((uintptr)v - (s->start<<PageShift)) / size;
804 switch(s->types.compression) {
806 ntypes = (s->npages << PageShift) / size;
807 nbytes3 = 8*sizeof(uintptr) + 1*ntypes;
808 data3 = runtime_mallocgc(nbytes3, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
809 s->types.compression = MTypes_Bytes;
810 s->types.data = (uintptr)data3;
811 ((uintptr*)data3)[1] = typ;
812 data3[8*sizeof(uintptr) + ofs] = 1;
816 ((uintptr*)s->types.data)[ofs] = typ;
820 data3 = (byte*)s->types.data;
822 if(((uintptr*)data3)[j] == typ) {
825 if(((uintptr*)data3)[j] == 0) {
826 ((uintptr*)data3)[j] = typ;
831 data3[8*sizeof(uintptr) + ofs] = j;
833 ntypes = (s->npages << PageShift) / size;
834 nbytes2 = ntypes * sizeof(uintptr);
835 data2 = runtime_mallocgc(nbytes2, 0, FlagNoProfiling|FlagNoScan|FlagNoInvokeGC);
836 s->types.compression = MTypes_Words;
837 s->types.data = (uintptr)data2;
839 // Move the contents of data3 to data2. Then deallocate data3.
840 for(j=0; j<ntypes; j++) {
841 t = data3[8*sizeof(uintptr) + j];
842 t = ((uintptr*)data3)[t];
852 runtime_gettype(void *v)
858 s = runtime_MHeap_LookupMaybe(&runtime_mheap, v);
861 switch(s->types.compression) {
868 ofs = (uintptr)v - (s->start<<PageShift);
869 t = ((uintptr*)s->types.data)[ofs/s->elemsize];
872 ofs = (uintptr)v - (s->start<<PageShift);
873 data = (byte*)s->types.data;
874 t = data[8*sizeof(uintptr) + ofs/s->elemsize];
875 t = ((uintptr*)data)[t];
878 runtime_throw("runtime_gettype: invalid compression kind");
881 runtime_printf("%p -> %d,%X\n", v, (int32)s->types.compression, (int64)t);
891 runtime_mal(uintptr n)
893 return runtime_mallocgc(n, 0, 0);
896 func new(typ *Type) (ret *uint8) {
897 ret = runtime_mallocgc(typ->__size, (uintptr)typ | TypeInfo_SingleObject, typ->kind&KindNoPointers ? FlagNoScan : 0);
901 cnew(const Type *typ, intgo n, int32 objtyp)
903 if((objtyp&(PtrSize-1)) != objtyp)
904 runtime_throw("runtime: invalid objtyp");
905 if(n < 0 || (typ->__size > 0 && (uintptr)n > (MaxMem/typ->__size)))
906 runtime_panicstring("runtime: allocation size out of range");
907 return runtime_mallocgc(typ->__size*n, (uintptr)typ | objtyp, typ->kind&KindNoPointers ? FlagNoScan : 0);
910 // same as runtime_new, but callable from C
912 runtime_cnew(const Type *typ)
914 return cnew(typ, 1, TypeInfo_SingleObject);
918 runtime_cnewarray(const Type *typ, intgo n)
920 return cnew(typ, n, TypeInfo_Array);
924 runtime_gc(2); // force GC and do eager sweep
927 func SetFinalizer(obj Eface, finalizer Eface) {
934 if(obj.__type_descriptor == nil) {
935 runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
938 if(obj.__type_descriptor->__code != GO_PTR) {
939 runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.__type_descriptor->__reflection);
942 ot = (const PtrType*)obj.type;
943 // As an implementation detail we do not run finalizers for zero-sized objects,
944 // because we use &runtime_zerobase for all such allocations.
945 if(ot->__element_type != nil && ot->__element_type->__size == 0)
947 // The following check is required for cases when a user passes a pointer to composite literal,
948 // but compiler makes it a pointer to global. For example:
949 // var Foo = &Object{}
951 // runtime.SetFinalizer(Foo, nil)
954 if((byte*)obj.__object < runtime_mheap.arena_start || runtime_mheap.arena_used <= (byte*)obj.__object)
956 if(!runtime_mlookup(obj.__object, &base, &size, nil) || obj.__object != base) {
957 // As an implementation detail we allow to set finalizers for an inner byte
958 // of an object if it could come from tiny alloc (see mallocgc for details).
959 if(ot->__element_type == nil || (ot->__element_type->__code&KindNoPointers) == 0 || ot->__element_type->__size >= TinySize) {
960 runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block (%p)\n", obj.__object);
964 if(finalizer.__type_descriptor != nil) {
965 runtime_createfing();
966 if(finalizer.__type_descriptor->__code != GO_FUNC)
968 ft = (const FuncType*)finalizer.__type_descriptor;
969 if(ft->__dotdotdot || ft->__in.__count != 1)
971 fint = *(Type**)ft->__in.__values;
972 if(__go_type_descriptors_equal(fint, obj.__type_descriptor)) {
974 } else if(fint->__code == GO_PTR && (fint->__uncommon == nil || fint->__uncommon->__name == nil || obj.type->__uncommon == nil || obj.type->__uncommon->__name == nil) && __go_type_descriptors_equal(((const PtrType*)fint)->__element_type, ((const PtrType*)obj.type)->__element_type)) {
975 // ok - not same type, but both pointers,
976 // one or the other is unnamed, and same element type, so assignable.
977 } else if(fint->kind == GO_INTERFACE && ((const InterfaceType*)fint)->__methods.__count == 0) {
978 // ok - satisfies empty interface
979 } else if(fint->kind == GO_INTERFACE && __go_convert_interface_2(fint, obj.__type_descriptor, 1) != nil) {
980 // ok - satisfies non-empty interface
984 ot = (const PtrType*)obj.__type_descriptor;
985 if(!runtime_addfinalizer(obj.__object, *(FuncVal**)finalizer.__object, ft, ot)) {
986 runtime_printf("runtime.SetFinalizer: finalizer already set\n");
990 // NOTE: asking to remove a finalizer when there currently isn't one set is OK.
991 runtime_removefinalizer(obj.__object);
996 runtime_printf("runtime.SetFinalizer: cannot pass %S to finalizer %S\n", *obj.__type_descriptor->__reflection, *finalizer.__type_descriptor->__reflection);
998 runtime_throw("runtime.SetFinalizer");