1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // See malloc.h for overview.
7 // TODO(rsc): double-check stats.
16 #include "go-string.h"
17 #include "interface.h"
19 typedef struct __go_empty_interface Eface;
20 typedef struct __go_type_descriptor Type;
21 typedef struct __go_func_type FuncType;
24 extern MStats mstats; // defined in extern.go
26 extern volatile int32 runtime_MemProfileRate
27 __asm__ ("libgo_runtime.runtime.MemProfileRate");
29 // Same algorithm from chan.c, but a different
30 // instance of the static uint32 x.
31 // Not protected by a lock - let the threads use
32 // the same random number if they like.
36 static uint32 x = 0x49f6428aUL;
44 // Allocate an object of at least size bytes.
45 // Small objects are allocated from the per-thread cache's free lists.
46 // Large objects (> 32 kB) are allocated straight from the heap.
48 runtime_mallocgc(uintptr size, uint32 refflag, int32 dogc, int32 zeroed)
50 int32 sizeclass, rate;
57 if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
58 runtime_throw("malloc/free - deadlock");
63 if(size <= MaxSmallSize) {
64 // Allocate from mcache free lists.
65 sizeclass = runtime_SizeToClass(size);
66 size = runtime_class_to_size[sizeclass];
68 v = runtime_MCache_Alloc(c, sizeclass, size, zeroed);
70 runtime_throw("out of memory");
72 mstats.total_alloc += size;
73 mstats.by_size[sizeclass].nmalloc++;
75 if(!runtime_mlookup(v, nil, nil, nil, &ref)) {
76 // runtime_printf("malloc %D; runtime_mlookup failed\n", (uint64)size);
77 runtime_throw("malloc runtime_mlookup");
79 *ref = RefNone | refflag;
81 // TODO(rsc): Report tracebacks for very large allocations.
83 // Allocate directly from heap.
84 npages = size >> PageShift;
85 if((size & PageMask) != 0)
87 s = runtime_MHeap_Alloc(&runtime_mheap, npages, 0, 1);
89 runtime_throw("out of memory");
90 size = npages<<PageShift;
92 mstats.total_alloc += size;
93 v = (void*)(s->start << PageShift);
95 // setup for mark sweep
96 s->gcref0 = RefNone | refflag;
100 __sync_bool_compare_and_swap(&m->mallocing, 1, 0);
102 if(__sync_bool_compare_and_swap(&m->gcing, 1, 0)) {
103 if(!(refflag & RefNoProfiling))
104 __go_run_goroutine_gc(0);
106 // We are being called from the profiler. Tell it
107 // to invoke the garbage collector when it is
108 // done. No need to use a sync function here.
109 m->gcing_for_prof = 1;
113 if(!(refflag & RefNoProfiling) && (rate = runtime_MemProfileRate) > 0) {
114 if(size >= (uint32) rate)
116 if((uint32) m->mcache->next_sample > size)
117 m->mcache->next_sample -= size;
119 // pick next profile time
120 if(rate > 0x3fffffff) // make 2*rate not overflow
122 m->mcache->next_sample = fastrand1() % (2*rate);
125 runtime_MProf_Malloc(v, size);
129 if(dogc && mstats.heap_alloc >= mstats.next_gc)
135 __go_alloc(uintptr size)
137 return runtime_mallocgc(size, 0, 0, 1);
140 // Free the object whose base pointer is v.
144 int32 sizeclass, size;
152 if(!__sync_bool_compare_and_swap(&m->mallocing, 0, 1))
153 runtime_throw("malloc/free - deadlock");
155 if(!runtime_mlookup(v, nil, nil, &s, &ref)) {
156 // runtime_printf("free %p: not an allocated block\n", v);
157 runtime_throw("free runtime_mlookup");
159 prof = *ref & RefProfiled;
162 // Find size class for v.
163 sizeclass = s->sizeclass;
167 runtime_MProf_Free(v, s->npages<<PageShift);
168 mstats.alloc -= s->npages<<PageShift;
169 runtime_memclr(v, s->npages<<PageShift);
170 runtime_MHeap_Free(&runtime_mheap, s, 1);
174 size = runtime_class_to_size[sizeclass];
175 if(size > (int32)sizeof(uintptr))
176 ((uintptr*)v)[1] = 1; // mark as "needs to be zeroed"
178 runtime_MProf_Free(v, size);
179 mstats.alloc -= size;
180 mstats.by_size[sizeclass].nfree++;
181 runtime_MCache_Free(c, v, sizeclass, size);
183 __sync_bool_compare_and_swap(&m->mallocing, 1, 0);
185 if(__sync_bool_compare_and_swap(&m->gcing, 1, 0))
186 __go_run_goroutine_gc(1);
190 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **sp, uint32 **ref)
197 s = runtime_MHeap_LookupMaybe(&runtime_mheap, (uintptr)v>>PageShift);
210 p = (byte*)((uintptr)s->start<<PageShift);
211 if(s->sizeclass == 0) {
216 *size = s->npages<<PageShift;
222 if((byte*)v >= (byte*)s->gcref) {
223 // pointers into the gc ref counts
224 // do not count as pointers.
228 n = runtime_class_to_size[s->sizeclass];
229 i = ((byte*)v - p)/n;
235 // good for error checking, but expensive
237 nobj = (s->npages << PageShift) / (n + RefcountOverhead);
238 if((byte*)s->gcref < p || (byte*)(s->gcref+nobj) > p+(s->npages<<PageShift)) {
239 // runtime_printf("odd span state=%d span=%p base=%p sizeclass=%d n=%D size=%D npages=%D\n",
240 // s->state, s, p, s->sizeclass, (uint64)nobj, (uint64)n, (uint64)s->npages);
241 // runtime_printf("s->base sizeclass %d v=%p base=%p gcref=%p blocksize=%D nobj=%D size=%D end=%p end=%p\n",
242 // s->sizeclass, v, p, s->gcref, (uint64)s->npages<<PageShift,
243 // (uint64)nobj, (uint64)n, s->gcref + nobj, p+(s->npages<<PageShift));
244 runtime_throw("bad gcref");
254 runtime_allocmcache(void)
258 runtime_lock(&runtime_mheap);
259 c = runtime_FixAlloc_Alloc(&runtime_mheap.cachealloc);
261 // Clear the free list used by FixAlloc; assume the rest is zeroed.
262 c->list[0].list = nil;
264 mstats.mcache_inuse = runtime_mheap.cachealloc.inuse;
265 mstats.mcache_sys = runtime_mheap.cachealloc.sys;
266 runtime_unlock(&runtime_mheap);
271 runtime_mallocinit(void)
273 runtime_initfintab();
274 runtime_Mprof_Init();
276 runtime_SysMemInit();
278 runtime_MHeap_Init(&runtime_mheap, runtime_SysAlloc);
279 m->mcache = runtime_allocmcache();
282 runtime_free(runtime_malloc(1));
288 runtime_mal(uintptr n)
290 return runtime_mallocgc(n, 0, 1, 1);
293 func Alloc(n uintptr) (p *byte) {
294 p = runtime_malloc(n);
301 func Lookup(p *byte) (base *byte, size uintptr) {
302 runtime_mlookup(p, &base, &size, nil, nil);
309 func SetFinalizer(obj Eface, finalizer Eface) {
314 if(obj.__type_descriptor == nil) {
315 // runtime_printf("runtime.SetFinalizer: first argument is nil interface\n");
317 runtime_throw("runtime.SetFinalizer");
319 if(obj.__type_descriptor->__code != GO_PTR) {
320 // runtime_printf("runtime.SetFinalizer: first argument is %S, not pointer\n", *obj.type->string);
323 if(!runtime_mlookup(obj.__object, &base, &size, nil, nil) || obj.__object != base) {
324 // runtime_printf("runtime.SetFinalizer: pointer not at beginning of allocated block\n");
328 if(finalizer.__type_descriptor != nil) {
329 if(finalizer.__type_descriptor->__code != GO_FUNC) {
331 // runtime_printf("runtime.SetFinalizer: second argument is %S, not func(%S)\n", *finalizer.type->string, *obj.type->string);
334 ft = (const FuncType*)finalizer.__type_descriptor;
335 if(ft->__dotdotdot || ft->__in.__count != 1 || !__go_type_descriptors_equal(*(Type**)ft->__in.__values, obj.__type_descriptor))
338 if(runtime_getfinalizer(obj.__object, 0)) {
339 // runtime_printf("runtime.SetFinalizer: finalizer already set");
343 runtime_addfinalizer(obj.__object, finalizer.__type_descriptor != nil ? *(void**)finalizer.__object : nil, ft);