1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Memory allocator, based on tcmalloc.
6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
8 // The main allocator works in runs of pages.
9 // Small allocation sizes (up to and including 32 kB) are
10 // rounded to one of about 100 size classes, each of which
11 // has its own free list of objects of exactly that size.
12 // Any free page of memory can be split into a set of objects
13 // of one size class, which are then managed using free list
16 // The allocator's data structures are:
18 // FixAlloc: a free-list allocator for fixed-size objects,
19 // used to manage storage used by the allocator.
20 // MHeap: the malloc heap, managed at page (4096-byte) granularity.
21 // MSpan: a run of pages managed by the MHeap.
22 // MCentral: a shared free list for a given size class.
23 // MCache: a per-thread (in Go, per-M) cache for small objects.
24 // MStats: allocation statistics.
26 // Allocating a small object proceeds up a hierarchy of caches:
28 // 1. Round the size up to one of the small size classes
29 // and look in the corresponding MCache free list.
30 // If the list is not empty, allocate an object from it.
31 // This can all be done without acquiring a lock.
33 // 2. If the MCache free list is empty, replenish it by
34 // taking a bunch of objects from the MCentral free list.
35 // Moving a bunch amortizes the cost of acquiring the MCentral lock.
37 // 3. If the MCentral free list is empty, replenish it by
38 // allocating a run of pages from the MHeap and then
39 // chopping that memory into a objects of the given size.
40 // Allocating many objects amortizes the cost of locking
43 // 4. If the MHeap is empty or has no page runs large enough,
44 // allocate a new group of pages (at least 1MB) from the
45 // operating system. Allocating a large run of pages
46 // amortizes the cost of talking to the operating system.
48 // Freeing a small object proceeds up the same hierarchy:
50 // 1. Look up the size class for the object and add it to
51 // the MCache free list.
53 // 2. If the MCache free list is too long or the MCache has
54 // too much memory, return some to the MCentral free lists.
56 // 3. If all the objects in a given span have returned to
57 // the MCentral list, return that span to the page heap.
59 // 4. If the heap has too much memory, return some to the
62 // TODO(rsc): Step 4 is not implemented.
64 // Allocating and freeing a large object uses the page heap
65 // directly, bypassing the MCache and MCentral free lists.
67 // The small objects on the MCache and MCentral free lists
68 // may or may not be zeroed. They are zeroed if and only if
69 // the second word of the object is zero. A span in the
70 // page heap is zeroed unless s->needzero is set. When a span
71 // is allocated to break into small objects, it is zeroed if needed
72 // and s->needzero is set. There are two main benefits to delaying the
75 // 1. stack frames allocated from the small object lists
76 // or the page heap can avoid zeroing altogether.
77 // 2. the cost of zeroing when reusing a small object is
78 // charged to the mutator, not the garbage collector.
80 // This C code was written with an eye toward translating to Go
81 // in the future. Methods have the form Type_Method(Type *t, ...).
83 typedef struct MCentral MCentral
;
84 typedef struct MHeap MHeap
;
85 typedef struct MSpan MSpan
;
86 typedef struct MStats MStats
;
87 typedef struct MLink MLink
;
88 typedef struct MTypes MTypes
;
89 typedef struct GCStats GCStats
;
94 PageSize
= 1<<PageShift
,
95 PageMask
= PageSize
- 1,
97 typedef uintptr PageID
; // address >> PageShift
101 // Computed constant. The definition of MaxSmallSize and the
102 // algorithm in msize.c produce some number of different allocation
103 // size classes. NumSizeClasses is that number. It's needed here
104 // because there are static arrays of this length; when msize runs its
105 // size choosing algorithm it double-checks that NumSizeClasses agrees.
108 // Tunable constants.
109 MaxSmallSize
= 32<<10,
111 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.goc.
115 FixAllocChunk
= 16<<10, // Chunk size for FixAlloc
116 MaxMHeapList
= 1<<(20 - PageShift
), // Maximum page length for fixed-size list in MHeap.
117 HeapAllocChunk
= 1<<20, // Chunk size for heap growth
119 // Number of bits in page to span calculations (4k pages).
120 // On Windows 64-bit we limit the arena to 32GB or 35 bits (see below for reason).
121 // On other 64-bit platforms, we limit the arena to 128GB, or 37 bits.
122 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
123 #if __SIZEOF_POINTER__ == 8
125 // Windows counts memory used by page table into committed memory
126 // of the process, so we can't reserve too much memory.
127 // See http://golang.org/issue/5402 and http://golang.org/issue/5236.
128 MHeapMap_Bits
= 35 - PageShift
,
130 MHeapMap_Bits
= 37 - PageShift
,
133 MHeapMap_Bits
= 32 - PageShift
,
136 // Max number of threads to run garbage collection.
137 // 2, 3, and 4 are all plausible maximums depending
138 // on the hardware details of the machine. The garbage
139 // collector scales well to 8 cpus.
143 // Maximum memory allocation size, a hint for callers.
144 // This must be a #define instead of an enum because it
146 #if __SIZEOF_POINTER__ == 8
147 #define MaxMem (1ULL<<(MHeapMap_Bits+PageShift)) /* 128 GB or 32 GB */
149 #define MaxMem ((uintptr)-1)
152 // A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
158 // SysAlloc obtains a large chunk of zeroed memory from the
159 // operating system, typically on the order of a hundred kilobytes
162 // SysUnused notifies the operating system that the contents
163 // of the memory region are no longer needed and can be reused
164 // for other purposes.
165 // SysUsed notifies the operating system that the contents
166 // of the memory region are needed again.
168 // SysFree returns it unconditionally; this is only used if
169 // an out-of-memory error has been detected midway through
170 // an allocation. It is okay if SysFree is a no-op.
172 // SysReserve reserves address space without allocating memory.
173 // If the pointer passed to it is non-nil, the caller wants the
174 // reservation there, but SysReserve can still choose another
175 // location if that one is unavailable.
177 // SysMap maps previously reserved address space for use.
179 void* runtime_SysAlloc(uintptr nbytes
, uint64
*stat
);
180 void runtime_SysFree(void *v
, uintptr nbytes
, uint64
*stat
);
181 void runtime_SysUnused(void *v
, uintptr nbytes
);
182 void runtime_SysUsed(void *v
, uintptr nbytes
);
183 void runtime_SysMap(void *v
, uintptr nbytes
, uint64
*stat
);
184 void* runtime_SysReserve(void *v
, uintptr nbytes
);
186 // FixAlloc is a simple free-list allocator for fixed size objects.
187 // Malloc uses a FixAlloc wrapped around SysAlloc to manages its
188 // MCache and MSpan objects.
190 // Memory returned by FixAlloc_Alloc is not zeroed.
191 // The caller is responsible for locking around FixAlloc calls.
192 // Callers can keep state in the object but the first word is
193 // smashed by freeing and reallocating.
197 void (*first
)(void *arg
, byte
*p
); // called first time p is returned
202 uintptr inuse
; // in-use bytes now
206 void runtime_FixAlloc_Init(FixAlloc
*f
, uintptr size
, void (*first
)(void*, byte
*), void *arg
, uint64
*stat
);
207 void* runtime_FixAlloc_Alloc(FixAlloc
*f
);
208 void runtime_FixAlloc_Free(FixAlloc
*f
, void *p
);
212 // Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
215 // General statistics.
216 uint64 alloc
; // bytes allocated and still in use
217 uint64 total_alloc
; // bytes allocated (even if freed)
218 uint64 sys
; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
219 uint64 nlookup
; // number of pointer lookups
220 uint64 nmalloc
; // number of mallocs
221 uint64 nfree
; // number of frees
223 // Statistics about malloc heap.
224 // protected by mheap.Lock
225 uint64 heap_alloc
; // bytes allocated and still in use
226 uint64 heap_sys
; // bytes obtained from system
227 uint64 heap_idle
; // bytes in idle spans
228 uint64 heap_inuse
; // bytes in non-idle spans
229 uint64 heap_released
; // bytes released to the OS
230 uint64 heap_objects
; // total number of allocated objects
232 // Statistics about allocation of low-level fixed-size structures.
233 // Protected by FixAlloc locks.
234 uint64 stacks_inuse
; // bootstrap stacks
236 uint64 mspan_inuse
; // MSpan structures
238 uint64 mcache_inuse
; // MCache structures
240 uint64 buckhash_sys
; // profiling bucket hash table
244 // Statistics about garbage collector.
245 // Protected by mheap or stopping the world during GC.
246 uint64 next_gc
; // next GC (in heap_alloc time)
247 uint64 last_gc
; // last GC (in absolute time)
248 uint64 pause_total_ns
;
249 uint64 pause_ns
[256];
254 // Statistics about allocation size classes.
259 } by_size
[NumSizeClasses
];
263 __asm__ (GOSYM_PREFIX
"runtime.memStats");
265 // Size classes. Computed and initialized by InitSizes.
267 // SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
268 // 1 <= sizeclass < NumSizeClasses, for n.
269 // Size class 0 is reserved to mean "not small".
271 // class_to_size[i] = largest size in class i
272 // class_to_allocnpages[i] = number of pages to allocate when
273 // making new objects in class i
275 int32
runtime_SizeToClass(int32
);
276 uintptr
runtime_roundupsize(uintptr
);
277 extern int32 runtime_class_to_size
[NumSizeClasses
];
278 extern int32 runtime_class_to_allocnpages
[NumSizeClasses
];
279 extern int8 runtime_size_to_class8
[1024/8 + 1];
280 extern int8 runtime_size_to_class128
[(MaxSmallSize
-1024)/128 + 1];
281 extern void runtime_InitSizes(void);
284 // Per-thread (in Go, per-M) cache for small objects.
285 // No locking needed because it is per-thread (per-M).
286 typedef struct MCacheList MCacheList
;
295 // The following members are accessed on every malloc,
296 // so they are grouped here for better caching.
297 int32 next_sample
; // trigger heap sample after allocating this many bytes
298 intptr local_cachealloc
; // bytes allocated (or freed) from cache since last lock of heap
299 // Allocator cache for tiny objects w/o pointers.
300 // See "Tiny allocator" comment in malloc.goc.
303 // The rest is not accessed on every malloc.
304 MCacheList list
[NumSizeClasses
];
305 // Local allocator stats, flushed during GC.
306 uintptr local_nlookup
; // number of pointer lookups
307 uintptr local_largefree
; // bytes freed for large objects (>MaxSmallSize)
308 uintptr local_nlargefree
; // number of frees for large objects (>MaxSmallSize)
309 uintptr local_nsmallfree
[NumSizeClasses
]; // number of frees for small objects (<=MaxSmallSize)
312 void runtime_MCache_Refill(MCache
*c
, int32 sizeclass
);
313 void runtime_MCache_Free(MCache
*c
, void *p
, int32 sizeclass
, uintptr size
);
314 void runtime_MCache_ReleaseAll(MCache
*c
);
316 // MTypes describes the types of blocks allocated within a span.
317 // The compression field describes the layout of the data.
320 // All blocks are free, or no type information is available for
322 // The data field has no meaning.
324 // The span contains just one block.
325 // The data field holds the type information.
326 // The sysalloc field has no meaning.
328 // The span contains multiple blocks.
329 // The data field points to an array of type [NumBlocks]uintptr,
330 // and each element of the array holds the type of the corresponding
333 // The span contains at most seven different types of blocks.
334 // The data field points to the following structure:
336 // type [8]uintptr // type[0] is always 0
337 // index [NumBlocks]byte
339 // The type of the i-th block is: data.type[data.index[i]]
349 byte compression
; // one of MTypes_*
355 KindSpecialFinalizer
= 1,
356 KindSpecialProfile
= 2,
357 // Note: The finalizer special must be first because if we're freeing
358 // an object, a finalizer special will cause the freeing operation
359 // to abort, and we want to keep the other special records around
363 typedef struct Special Special
;
366 Special
* next
; // linked list in span
367 uint16 offset
; // span offset of object
368 byte kind
; // kind of Special
371 // The described object has a finalizer set for it.
372 typedef struct SpecialFinalizer SpecialFinalizer
;
373 struct SpecialFinalizer
381 // The described object is being heap profiled.
382 typedef struct Bucket Bucket
; // from mprof.goc
383 typedef struct SpecialProfile SpecialProfile
;
384 struct SpecialProfile
390 // An MSpan is a run of pages.
400 MSpan
*next
; // in a span linked list
401 MSpan
*prev
; // in a span linked list
402 PageID start
; // starting page number
403 uintptr npages
; // number of pages in span
404 MLink
*freelist
; // list of free objects
406 // if sweepgen == h->sweepgen - 2, the span needs sweeping
407 // if sweepgen == h->sweepgen - 1, the span is currently being swept
408 // if sweepgen == h->sweepgen, the span is swept and ready to use
409 // h->sweepgen is incremented by 2 after every GC
411 uint16 ref
; // number of allocated objects in this span
412 uint8 sizeclass
; // size class
413 uint8 state
; // MSpanInUse etc
414 uint8 needzero
; // needs to be zeroed before allocation
415 uintptr elemsize
; // computed from sizeclass or from npages
416 int64 unusedsince
; // First time spotted by GC in MSpanFree state
417 uintptr npreleased
; // number of pages released to the OS
418 byte
*limit
; // end of data in span
419 MTypes types
; // types of allocated objects in this span
420 Lock specialLock
; // TODO: use to protect types also (instead of settype_lock)
421 Special
*specials
; // linked list of special records sorted by offset.
424 void runtime_MSpan_Init(MSpan
*span
, PageID start
, uintptr npages
);
425 void runtime_MSpan_EnsureSwept(MSpan
*span
);
426 bool runtime_MSpan_Sweep(MSpan
*span
);
428 // Every MSpan is in one doubly-linked list,
429 // either one of the MHeap's free lists or one of the
430 // MCentral's span lists. We use empty MSpan structures as list heads.
431 void runtime_MSpanList_Init(MSpan
*list
);
432 bool runtime_MSpanList_IsEmpty(MSpan
*list
);
433 void runtime_MSpanList_Insert(MSpan
*list
, MSpan
*span
);
434 void runtime_MSpanList_InsertBack(MSpan
*list
, MSpan
*span
);
435 void runtime_MSpanList_Remove(MSpan
*span
); // from whatever list it is in
438 // Central list of free objects of a given size.
448 void runtime_MCentral_Init(MCentral
*c
, int32 sizeclass
);
449 int32
runtime_MCentral_AllocList(MCentral
*c
, MLink
**first
);
450 void runtime_MCentral_FreeList(MCentral
*c
, MLink
*first
);
451 bool runtime_MCentral_FreeSpan(MCentral
*c
, MSpan
*s
, int32 n
, MLink
*start
, MLink
*end
);
454 // The heap itself is the "free[]" and "large" arrays,
455 // but all the other global data is here too.
459 MSpan free
[MaxMHeapList
]; // free lists of given length
460 MSpan freelarge
; // free lists length >= MaxMHeapList
461 MSpan busy
[MaxMHeapList
]; // busy lists of large objects of given length
462 MSpan busylarge
; // busy lists of large objects length >= MaxMHeapList
463 MSpan
**allspans
; // all spans out there
464 MSpan
**sweepspans
; // copy of allspans referenced by sweeper
467 uint32 sweepgen
; // sweep generation, see comment in MSpan
468 uint32 sweepdone
; // all spans are swept
472 uintptr spans_mapped
;
474 // range of addresses we might see in the heap
476 uintptr bitmap_mapped
;
481 // central free lists for small size classes.
482 // the padding makes sure that the MCentrals are
483 // spaced CacheLineSize bytes apart, so that each MCentral.Lock
484 // gets its own cache line.
488 } central
[NumSizeClasses
];
490 FixAlloc spanalloc
; // allocator for Span*
491 FixAlloc cachealloc
; // allocator for MCache*
492 FixAlloc specialfinalizeralloc
; // allocator for SpecialFinalizer*
493 FixAlloc specialprofilealloc
; // allocator for SpecialProfile*
494 Lock speciallock
; // lock for sepcial record allocators.
497 uint64 largefree
; // bytes freed for large objects (>MaxSmallSize)
498 uint64 nlargefree
; // number of frees for large objects (>MaxSmallSize)
499 uint64 nsmallfree
[NumSizeClasses
]; // number of frees for small objects (<=MaxSmallSize)
501 extern MHeap runtime_mheap
;
503 void runtime_MHeap_Init(MHeap
*h
);
504 MSpan
* runtime_MHeap_Alloc(MHeap
*h
, uintptr npage
, int32 sizeclass
, bool large
, bool needzero
);
505 void runtime_MHeap_Free(MHeap
*h
, MSpan
*s
, int32 acct
);
506 MSpan
* runtime_MHeap_Lookup(MHeap
*h
, void *v
);
507 MSpan
* runtime_MHeap_LookupMaybe(MHeap
*h
, void *v
);
508 void runtime_MGetSizeClassInfo(int32 sizeclass
, uintptr
*size
, int32
*npages
, int32
*nobj
);
509 void* runtime_MHeap_SysAlloc(MHeap
*h
, uintptr n
);
510 void runtime_MHeap_MapBits(MHeap
*h
);
511 void runtime_MHeap_MapSpans(MHeap
*h
);
512 void runtime_MHeap_Scavenger(void*);
514 void* runtime_mallocgc(uintptr size
, uintptr typ
, uint32 flag
);
515 void* runtime_persistentalloc(uintptr size
, uintptr align
, uint64
*stat
);
516 int32
runtime_mlookup(void *v
, byte
**base
, uintptr
*size
, MSpan
**s
);
517 void runtime_gc(int32 force
);
518 uintptr
runtime_sweepone(void);
519 void runtime_markscan(void *v
);
520 void runtime_marknogc(void *v
);
521 void runtime_checkallocated(void *v
, uintptr n
);
522 void runtime_markfreed(void *v
, uintptr n
);
523 void runtime_checkfreed(void *v
, uintptr n
);
524 extern int32 runtime_checking
;
525 void runtime_markspan(void *v
, uintptr size
, uintptr n
, bool leftover
);
526 void runtime_unmarkspan(void *v
, uintptr size
);
527 void runtime_purgecachedstats(MCache
*);
528 void* runtime_cnew(const Type
*);
529 void* runtime_cnewarray(const Type
*, intgo
);
531 void runtime_settype_flush(M
*);
532 void runtime_settype_sysfree(MSpan
*);
533 uintptr
runtime_gettype(void*);
538 FlagNoScan
= 1<<0, // GC doesn't have to scan object
539 FlagNoProfiling
= 1<<1, // must not profile
540 FlagNoGC
= 1<<2, // must not free or scan for pointers
541 FlagNoZero
= 1<<3, // don't zero memory
542 FlagNoInvokeGC
= 1<<4, // don't invoke GC
545 typedef struct Obj Obj
;
548 byte
*p
; // data pointer
549 uintptr n
; // size of data in bytes
550 uintptr ti
; // type info
553 void runtime_MProf_Malloc(void*, uintptr
, uintptr
);
554 void runtime_MProf_Free(Bucket
*, void*, uintptr
, bool);
555 void runtime_MProf_GC(void);
556 void runtime_MProf_TraceGC(void);
558 void runtime_MProf_Mark(struct Workbuf
**, void (*)(struct Workbuf
**, Obj
));
559 int32
runtime_gcprocs(void);
560 void runtime_helpgc(int32 nproc
);
561 void runtime_gchelper(void);
563 void runtime_setprofilebucket(void *p
, Bucket
*b
);
565 struct __go_func_type
;
566 struct __go_ptr_type
;
567 bool runtime_addfinalizer(void *p
, FuncVal
*fn
, const struct __go_func_type
*, const struct __go_ptr_type
*);
568 void runtime_removefinalizer(void*);
569 void runtime_queuefinalizer(void *p
, FuncVal
*fn
, const struct __go_func_type
*ft
, const struct __go_ptr_type
*ot
);
571 void runtime_freeallspecials(MSpan
*span
, void *p
, uintptr size
);
572 bool runtime_freespecial(Special
*s
, void *p
, uintptr size
, bool freed
);
576 TypeInfo_SingleObject
= 0,
580 // Enables type information at the end of blocks allocated from heap
581 DebugTypeAtBlockEnd
= 0,
584 // defined in mgc0.go
585 void runtime_gc_m_ptr(Eface
*);
586 void runtime_gc_itab_ptr(Eface
*);
588 void runtime_memorydump(void);
589 int32
runtime_setgcpercent(int32
);
591 void runtime_proc_scan(struct Workbuf
**, void (*)(struct Workbuf
**, Obj
));
592 void runtime_time_scan(struct Workbuf
**, void (*)(struct Workbuf
**, Obj
));
593 void runtime_netpoll_scan(struct Workbuf
**, void (*)(struct Workbuf
**, Obj
));