Fix PR47707
[official-gcc.git] / libgo / runtime / malloc.h
blob369f9b8e7711c1ac97e5193ac5b55112c5b991d5
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Memory allocator, based on tcmalloc.
6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
8 // The main allocator works in runs of pages.
9 // Small allocation sizes (up to and including 32 kB) are
10 // rounded to one of about 100 size classes, each of which
11 // has its own free list of objects of exactly that size.
12 // Any free page of memory can be split into a set of objects
13 // of one size class, which are then managed using free list
14 // allocators.
16 // The allocator's data structures are:
18 // FixAlloc: a free-list allocator for fixed-size objects,
19 // used to manage storage used by the allocator.
20 // MHeap: the malloc heap, managed at page (4096-byte) granularity.
21 // MSpan: a run of pages managed by the MHeap.
22 // MHeapMap: a mapping from page IDs to MSpans.
23 // MCentral: a shared free list for a given size class.
24 // MCache: a per-thread (in Go, per-M) cache for small objects.
25 // MStats: allocation statistics.
27 // Allocating a small object proceeds up a hierarchy of caches:
29 // 1. Round the size up to one of the small size classes
30 // and look in the corresponding MCache free list.
31 // If the list is not empty, allocate an object from it.
32 // This can all be done without acquiring a lock.
34 // 2. If the MCache free list is empty, replenish it by
35 // taking a bunch of objects from the MCentral free list.
36 // Moving a bunch amortizes the cost of acquiring the MCentral lock.
38 // 3. If the MCentral free list is empty, replenish it by
39 // allocating a run of pages from the MHeap and then
40 // chopping that memory into a objects of the given size.
41 // Allocating many objects amortizes the cost of locking
42 // the heap.
44 // 4. If the MHeap is empty or has no page runs large enough,
45 // allocate a new group of pages (at least 1MB) from the
46 // operating system. Allocating a large run of pages
47 // amortizes the cost of talking to the operating system.
49 // Freeing a small object proceeds up the same hierarchy:
51 // 1. Look up the size class for the object and add it to
52 // the MCache free list.
54 // 2. If the MCache free list is too long or the MCache has
55 // too much memory, return some to the MCentral free lists.
57 // 3. If all the objects in a given span have returned to
58 // the MCentral list, return that span to the page heap.
60 // 4. If the heap has too much memory, return some to the
61 // operating system.
63 // TODO(rsc): Step 4 is not implemented.
65 // Allocating and freeing a large object uses the page heap
66 // directly, bypassing the MCache and MCentral free lists.
68 // The small objects on the MCache and MCentral free lists
69 // may or may not be zeroed. They are zeroed if and only if
70 // the second word of the object is zero. The spans in the
71 // page heap are always zeroed. When a span full of objects
72 // is returned to the page heap, the objects that need to be
73 // are zeroed first. There are two main benefits to delaying the
74 // zeroing this way:
76 // 1. stack frames allocated from the small object lists
77 // can avoid zeroing altogether.
78 // 2. the cost of zeroing when reusing a small object is
79 // charged to the mutator, not the garbage collector.
81 // This C code was written with an eye toward translating to Go
82 // in the future. Methods have the form Type_Method(Type *t, ...).
84 typedef struct FixAlloc FixAlloc;
85 typedef struct MCentral MCentral;
86 typedef struct MHeap MHeap;
87 typedef struct MHeapMap MHeapMap;
88 typedef struct MSpan MSpan;
89 typedef struct MStats MStats;
90 typedef struct MLink MLink;
92 enum
94 PageShift = 12,
95 PageSize = 1<<PageShift,
96 PageMask = PageSize - 1,
98 typedef uintptr PageID; // address >> PageShift
100 enum
102 // Tunable constants.
103 NumSizeClasses = 67, // Number of size classes (must match msize.c)
104 MaxSmallSize = 32<<10,
106 FixAllocChunk = 128<<10, // Chunk size for FixAlloc
107 MaxMCacheListLen = 256, // Maximum objects on MCacheList
108 MaxMCacheSize = 2<<20, // Maximum bytes in one MCache
109 MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap.
110 HeapAllocChunk = 1<<20, // Chunk size for heap growth
113 #if __SIZEOF_POINTER__ == 8
114 #include "mheapmap64.h"
115 #else
116 #include "mheapmap32.h"
117 #endif
119 // A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
120 struct MLink
122 MLink *next;
125 // SysAlloc obtains a large chunk of zeroed memory from the
126 // operating system, typically on the order of a hundred kilobytes
127 // or a megabyte.
129 // SysUnused notifies the operating system that the contents
130 // of the memory region are no longer needed and can be reused
131 // for other purposes. The program reserves the right to start
132 // accessing those pages in the future.
134 // SysFree returns it unconditionally; this is only used if
135 // an out-of-memory error has been detected midway through
136 // an allocation. It is okay if SysFree is a no-op.
138 void* runtime_SysAlloc(uintptr nbytes);
139 void runtime_SysFree(void *v, uintptr nbytes);
140 void runtime_SysUnused(void *v, uintptr nbytes);
141 void runtime_SysMemInit(void);
143 // FixAlloc is a simple free-list allocator for fixed size objects.
144 // Malloc uses a FixAlloc wrapped around SysAlloc to manages its
145 // MCache and MSpan objects.
147 // Memory returned by FixAlloc_Alloc is not zeroed.
148 // The caller is responsible for locking around FixAlloc calls.
149 // Callers can keep state in the object but the first word is
150 // smashed by freeing and reallocating.
151 struct FixAlloc
153 uintptr size;
154 void *(*alloc)(uintptr);
155 void (*first)(void *arg, byte *p); // called first time p is returned
156 void *arg;
157 MLink *list;
158 byte *chunk;
159 uint32 nchunk;
160 uintptr inuse; // in-use bytes now
161 uintptr sys; // bytes obtained from system
164 void runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
165 void* runtime_FixAlloc_Alloc(FixAlloc *f);
166 void runtime_FixAlloc_Free(FixAlloc *f, void *p);
169 // Statistics.
170 // Shared with Go: if you edit this structure, also edit extern.go.
171 struct MStats
173 // General statistics. No locking; approximate.
174 uint64 alloc; // bytes allocated and still in use
175 uint64 total_alloc; // bytes allocated (even if freed)
176 uint64 sys; // bytes obtained from system (should be sum of xxx_sys below)
177 uint64 nlookup; // number of pointer lookups
178 uint64 nmalloc; // number of mallocs
179 uint64 nfree; // number of frees
181 // Statistics about malloc heap.
182 // protected by mheap.Lock
183 uint64 heap_alloc; // bytes allocated and still in use
184 uint64 heap_sys; // bytes obtained from system
185 uint64 heap_idle; // bytes in idle spans
186 uint64 heap_inuse; // bytes in non-idle spans
187 uint64 heap_objects; // total number of allocated objects
189 // Statistics about allocation of low-level fixed-size structures.
190 // Protected by FixAlloc locks.
191 uint64 stacks_inuse; // bootstrap stacks
192 uint64 stacks_sys;
193 uint64 mspan_inuse; // MSpan structures
194 uint64 mspan_sys;
195 uint64 mcache_inuse; // MCache structures
196 uint64 mcache_sys;
197 uint64 heapmap_sys; // heap map
198 uint64 buckhash_sys; // profiling bucket hash table
200 // Statistics about garbage collector.
201 // Protected by stopping the world during GC.
202 uint64 next_gc; // next GC (in heap_alloc time)
203 uint64 pause_total_ns;
204 uint64 pause_ns[256];
205 uint32 numgc;
206 bool enablegc;
207 bool debuggc;
209 // Statistics about allocation size classes.
210 // No locking; approximate.
211 struct {
212 uint32 size;
213 uint64 nmalloc;
214 uint64 nfree;
215 } by_size[NumSizeClasses];
218 extern MStats mstats
219 __asm__ ("libgo_runtime.runtime.MemStats");
222 // Size classes. Computed and initialized by InitSizes.
224 // SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
225 // 1 <= sizeclass < NumSizeClasses, for n.
226 // Size class 0 is reserved to mean "not small".
228 // class_to_size[i] = largest size in class i
229 // class_to_allocnpages[i] = number of pages to allocate when
230 // making new objects in class i
231 // class_to_transfercount[i] = number of objects to move when
232 // taking a bunch of objects out of the central lists
233 // and putting them in the thread free list.
235 int32 runtime_SizeToClass(int32);
236 extern int32 runtime_class_to_size[NumSizeClasses];
237 extern int32 runtime_class_to_allocnpages[NumSizeClasses];
238 extern int32 runtime_class_to_transfercount[NumSizeClasses];
239 extern void runtime_InitSizes(void);
242 // Per-thread (in Go, per-M) cache for small objects.
243 // No locking needed because it is per-thread (per-M).
244 typedef struct MCacheList MCacheList;
245 struct MCacheList
247 MLink *list;
248 uint32 nlist;
249 uint32 nlistmin;
252 struct MCache
254 MCacheList list[NumSizeClasses];
255 uint64 size;
256 int64 local_alloc; // bytes allocated (or freed) since last lock of heap
257 int64 local_objects; // objects allocated (or freed) since last lock of heap
258 int32 next_sample; // trigger heap sample after allocating this many bytes
261 void* runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
262 void runtime_MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
263 void runtime_MCache_ReleaseAll(MCache *c);
265 // An MSpan is a run of pages.
266 enum
268 MSpanInUse = 0,
269 MSpanFree,
270 MSpanListHead,
271 MSpanDead,
273 struct MSpan
275 MSpan *next; // in a span linked list
276 MSpan *prev; // in a span linked list
277 MSpan *allnext; // in the list of all spans
278 PageID start; // starting page number
279 uintptr npages; // number of pages in span
280 MLink *freelist; // list of free objects
281 uint32 ref; // number of allocated objects in this span
282 uint32 sizeclass; // size class
283 uint32 state; // MSpanInUse etc
284 union {
285 uint32 *gcref; // sizeclass > 0
286 uint32 gcref0; // sizeclass == 0
290 void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
292 // Every MSpan is in one doubly-linked list,
293 // either one of the MHeap's free lists or one of the
294 // MCentral's span lists. We use empty MSpan structures as list heads.
295 void runtime_MSpanList_Init(MSpan *list);
296 bool runtime_MSpanList_IsEmpty(MSpan *list);
297 void runtime_MSpanList_Insert(MSpan *list, MSpan *span);
298 void runtime_MSpanList_Remove(MSpan *span); // from whatever list it is in
301 // Central list of free objects of a given size.
302 struct MCentral
304 Lock;
305 int32 sizeclass;
306 MSpan nonempty;
307 MSpan empty;
308 int32 nfree;
311 void runtime_MCentral_Init(MCentral *c, int32 sizeclass);
312 int32 runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **first);
313 void runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *first);
315 // Main malloc heap.
316 // The heap itself is the "free[]" and "large" arrays,
317 // but all the other global data is here too.
318 struct MHeap
320 Lock;
321 MSpan free[MaxMHeapList]; // free lists of given length
322 MSpan large; // free lists length >= MaxMHeapList
323 MSpan *allspans;
325 // span lookup
326 MHeapMap map;
328 // range of addresses we might see in the heap
329 byte *min;
330 byte *max;
332 // central free lists for small size classes.
333 // the union makes sure that the MCentrals are
334 // spaced 64 bytes apart, so that each MCentral.Lock
335 // gets its own cache line.
336 union {
337 MCentral;
338 byte pad[64];
339 } central[NumSizeClasses];
341 FixAlloc spanalloc; // allocator for Span*
342 FixAlloc cachealloc; // allocator for MCache*
344 extern MHeap runtime_mheap;
346 void runtime_MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
347 MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
348 void runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct);
349 MSpan* runtime_MHeap_Lookup(MHeap *h, PageID p);
350 MSpan* runtime_MHeap_LookupMaybe(MHeap *h, PageID p);
351 void runtime_MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
353 void* runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
354 int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
355 void runtime_gc(int32 force);
357 void* runtime_SysAlloc(uintptr);
358 void runtime_SysUnused(void*, uintptr);
359 void runtime_SysFree(void*, uintptr);
361 enum
363 RefcountOverhead = 4, // one uint32 per object
365 RefFree = 0, // must be zero
366 RefStack, // stack segment - don't free and don't scan for pointers
367 RefNone, // no references
368 RefSome, // some references
369 RefNoPointers = 0x80000000U, // flag - no pointers here
370 RefHasFinalizer = 0x40000000U, // flag - has finalizer
371 RefProfiled = 0x20000000U, // flag - is in profiling table
372 RefNoProfiling = 0x10000000U, // flag - must not profile
373 RefFlags = 0xFFFF0000U,
376 void runtime_Mprof_Init(void);
377 void runtime_MProf_Malloc(void*, uintptr);
378 void runtime_MProf_Free(void*, uintptr);
379 void runtime_MProf_Mark(void (*scan)(byte *, int64));
381 // Malloc profiling settings.
382 // Must match definition in extern.go.
383 enum {
384 MProf_None = 0,
385 MProf_Sample = 1,
386 MProf_All = 2,
388 extern int32 runtime_malloc_profile;
390 typedef struct Finalizer Finalizer;
391 struct Finalizer
393 Finalizer *next; // for use by caller of getfinalizer
394 void (*fn)(void*);
395 void *arg;
396 const struct __go_func_type *ft;
399 Finalizer* runtime_getfinalizer(void*, bool);