In gcc/: 2010-12-06 Nicola Pero <nicola.pero@meta-innovation.com>
[official-gcc.git] / libgo / runtime / malloc.h
blob600b3b176ff0f3c7ca2c47472506a31c2cbf7662
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Memory allocator, based on tcmalloc.
6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
8 // The main allocator works in runs of pages.
9 // Small allocation sizes (up to and including 32 kB) are
10 // rounded to one of about 100 size classes, each of which
11 // has its own free list of objects of exactly that size.
12 // Any free page of memory can be split into a set of objects
13 // of one size class, which are then managed using free list
14 // allocators.
16 // The allocator's data structures are:
18 // FixAlloc: a free-list allocator for fixed-size objects,
19 // used to manage storage used by the allocator.
20 // MHeap: the malloc heap, managed at page (4096-byte) granularity.
21 // MSpan: a run of pages managed by the MHeap.
22 // MHeapMap: a mapping from page IDs to MSpans.
23 // MCentral: a shared free list for a given size class.
24 // MCache: a per-thread (in Go, per-M) cache for small objects.
25 // MStats: allocation statistics.
27 // Allocating a small object proceeds up a hierarchy of caches:
29 // 1. Round the size up to one of the small size classes
30 // and look in the corresponding MCache free list.
31 // If the list is not empty, allocate an object from it.
32 // This can all be done without acquiring a lock.
34 // 2. If the MCache free list is empty, replenish it by
35 // taking a bunch of objects from the MCentral free list.
36 // Moving a bunch amortizes the cost of acquiring the MCentral lock.
38 // 3. If the MCentral free list is empty, replenish it by
39 // allocating a run of pages from the MHeap and then
40 // chopping that memory into a objects of the given size.
41 // Allocating many objects amortizes the cost of locking
42 // the heap.
44 // 4. If the MHeap is empty or has no page runs large enough,
45 // allocate a new group of pages (at least 1MB) from the
46 // operating system. Allocating a large run of pages
47 // amortizes the cost of talking to the operating system.
49 // Freeing a small object proceeds up the same hierarchy:
51 // 1. Look up the size class for the object and add it to
52 // the MCache free list.
54 // 2. If the MCache free list is too long or the MCache has
55 // too much memory, return some to the MCentral free lists.
57 // 3. If all the objects in a given span have returned to
58 // the MCentral list, return that span to the page heap.
60 // 4. If the heap has too much memory, return some to the
61 // operating system.
63 // TODO(rsc): Step 4 is not implemented.
65 // Allocating and freeing a large object uses the page heap
66 // directly, bypassing the MCache and MCentral free lists.
68 // The small objects on the MCache and MCentral free lists
69 // may or may not be zeroed. They are zeroed if and only if
70 // the second word of the object is zero. The spans in the
71 // page heap are always zeroed. When a span full of objects
72 // is returned to the page heap, the objects that need to be
73 // are zeroed first. There are two main benefits to delaying the
74 // zeroing this way:
76 // 1. stack frames allocated from the small object lists
77 // can avoid zeroing altogether.
78 // 2. the cost of zeroing when reusing a small object is
79 // charged to the mutator, not the garbage collector.
81 // This C code was written with an eye toward translating to Go
82 // in the future. Methods have the form Type_Method(Type *t, ...).
84 typedef struct FixAlloc FixAlloc;
85 typedef struct MCentral MCentral;
86 typedef struct MHeap MHeap;
87 typedef struct MHeapMap MHeapMap;
88 typedef struct MSpan MSpan;
89 typedef struct MStats MStats;
90 typedef struct MLink MLink;
92 enum
94 PageShift = 12,
95 PageSize = 1<<PageShift,
96 PageMask = PageSize - 1,
98 typedef uintptr PageID; // address >> PageShift
100 enum
102 // Tunable constants.
103 NumSizeClasses = 67, // Number of size classes (must match msize.c)
104 MaxSmallSize = 32<<10,
106 FixAllocChunk = 128<<10, // Chunk size for FixAlloc
107 MaxMCacheListLen = 256, // Maximum objects on MCacheList
108 MaxMCacheSize = 2<<20, // Maximum bytes in one MCache
109 MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap.
110 HeapAllocChunk = 1<<20, // Chunk size for heap growth
113 #if __SIZEOF_POINTER__ == 8
114 #include "mheapmap64.h"
115 #else
116 #include "mheapmap32.h"
117 #endif
119 // A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
120 struct MLink
122 MLink *next;
125 // SysAlloc obtains a large chunk of zeroed memory from the
126 // operating system, typically on the order of a hundred kilobytes
127 // or a megabyte.
129 // SysUnused notifies the operating system that the contents
130 // of the memory region are no longer needed and can be reused
131 // for other purposes. The program reserves the right to start
132 // accessing those pages in the future.
134 // SysFree returns it unconditionally; this is only used if
135 // an out-of-memory error has been detected midway through
136 // an allocation. It is okay if SysFree is a no-op.
138 void* runtime_SysAlloc(uintptr nbytes);
139 void runtime_SysFree(void *v, uintptr nbytes);
140 void runtime_SysUnused(void *v, uintptr nbytes);
141 void runtime_SysMemInit(void);
143 // FixAlloc is a simple free-list allocator for fixed size objects.
144 // Malloc uses a FixAlloc wrapped around SysAlloc to manages its
145 // MCache and MSpan objects.
147 // Memory returned by FixAlloc_Alloc is not zeroed.
148 // The caller is responsible for locking around FixAlloc calls.
149 // Callers can keep state in the object but the first word is
150 // smashed by freeing and reallocating.
151 struct FixAlloc
153 uintptr size;
154 void *(*alloc)(uintptr);
155 void (*first)(void *arg, byte *p); // called first time p is returned
156 void *arg;
157 MLink *list;
158 byte *chunk;
159 uint32 nchunk;
160 uintptr inuse; // in-use bytes now
161 uintptr sys; // bytes obtained from system
164 void runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void *(*alloc)(uintptr), void (*first)(void*, byte*), void *arg);
165 void* runtime_FixAlloc_Alloc(FixAlloc *f);
166 void runtime_FixAlloc_Free(FixAlloc *f, void *p);
169 // Statistics.
170 // Shared with Go: if you edit this structure, also edit extern.go.
171 struct MStats
173 // General statistics. No locking; approximate.
174 uint64 alloc; // bytes allocated and still in use
175 uint64 total_alloc; // bytes allocated (even if freed)
176 uint64 sys; // bytes obtained from system (should be sum of xxx_sys below)
177 uint64 nlookup; // number of pointer lookups
178 uint64 nmalloc; // number of mallocs
180 // Statistics about malloc heap.
181 // protected by mheap.Lock
182 uint64 heap_alloc; // bytes allocated and still in use
183 uint64 heap_sys; // bytes obtained from system
184 uint64 heap_idle; // bytes in idle spans
185 uint64 heap_inuse; // bytes in non-idle spans
186 uint64 heap_objects; // total number of allocated objects
188 // Statistics about allocation of low-level fixed-size structures.
189 // Protected by FixAlloc locks.
190 uint64 stacks_inuse; // bootstrap stacks
191 uint64 stacks_sys;
192 uint64 mspan_inuse; // MSpan structures
193 uint64 mspan_sys;
194 uint64 mcache_inuse; // MCache structures
195 uint64 mcache_sys;
196 uint64 heapmap_sys; // heap map
197 uint64 buckhash_sys; // profiling bucket hash table
199 // Statistics about garbage collector.
200 // Protected by stopping the world during GC.
201 uint64 next_gc; // next GC (in heap_alloc time)
202 uint64 pause_ns;
203 uint32 numgc;
204 bool enablegc;
205 bool debuggc;
207 // Statistics about allocation size classes.
208 // No locking; approximate.
209 struct {
210 uint32 size;
211 uint64 nmalloc;
212 uint64 nfree;
213 } by_size[NumSizeClasses];
216 extern MStats mstats
217 __asm__ ("libgo_runtime.runtime.MemStats");
220 // Size classes. Computed and initialized by InitSizes.
222 // SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
223 // 1 <= sizeclass < NumSizeClasses, for n.
224 // Size class 0 is reserved to mean "not small".
226 // class_to_size[i] = largest size in class i
227 // class_to_allocnpages[i] = number of pages to allocate when
228 // making new objects in class i
229 // class_to_transfercount[i] = number of objects to move when
230 // taking a bunch of objects out of the central lists
231 // and putting them in the thread free list.
233 int32 runtime_SizeToClass(int32);
234 extern int32 runtime_class_to_size[NumSizeClasses];
235 extern int32 runtime_class_to_allocnpages[NumSizeClasses];
236 extern int32 runtime_class_to_transfercount[NumSizeClasses];
237 extern void runtime_InitSizes(void);
240 // Per-thread (in Go, per-M) cache for small objects.
241 // No locking needed because it is per-thread (per-M).
242 typedef struct MCacheList MCacheList;
243 struct MCacheList
245 MLink *list;
246 uint32 nlist;
247 uint32 nlistmin;
250 struct MCache
252 MCacheList list[NumSizeClasses];
253 uint64 size;
254 int64 local_alloc; // bytes allocated (or freed) since last lock of heap
255 int64 local_objects; // objects allocated (or freed) since last lock of heap
256 int32 next_sample; // trigger heap sample after allocating this many bytes
259 void* runtime_MCache_Alloc(MCache *c, int32 sizeclass, uintptr size, int32 zeroed);
260 void runtime_MCache_Free(MCache *c, void *p, int32 sizeclass, uintptr size);
261 void runtime_MCache_ReleaseAll(MCache *c);
263 // An MSpan is a run of pages.
264 enum
266 MSpanInUse = 0,
267 MSpanFree,
268 MSpanListHead,
269 MSpanDead,
271 struct MSpan
273 MSpan *next; // in a span linked list
274 MSpan *prev; // in a span linked list
275 MSpan *allnext; // in the list of all spans
276 PageID start; // starting page number
277 uintptr npages; // number of pages in span
278 MLink *freelist; // list of free objects
279 uint32 ref; // number of allocated objects in this span
280 uint32 sizeclass; // size class
281 uint32 state; // MSpanInUse etc
282 union {
283 uint32 *gcref; // sizeclass > 0
284 uint32 gcref0; // sizeclass == 0
288 void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
290 // Every MSpan is in one doubly-linked list,
291 // either one of the MHeap's free lists or one of the
292 // MCentral's span lists. We use empty MSpan structures as list heads.
293 void runtime_MSpanList_Init(MSpan *list);
294 bool runtime_MSpanList_IsEmpty(MSpan *list);
295 void runtime_MSpanList_Insert(MSpan *list, MSpan *span);
296 void runtime_MSpanList_Remove(MSpan *span); // from whatever list it is in
299 // Central list of free objects of a given size.
300 struct MCentral
302 Lock;
303 int32 sizeclass;
304 MSpan nonempty;
305 MSpan empty;
306 int32 nfree;
309 void runtime_MCentral_Init(MCentral *c, int32 sizeclass);
310 int32 runtime_MCentral_AllocList(MCentral *c, int32 n, MLink **first);
311 void runtime_MCentral_FreeList(MCentral *c, int32 n, MLink *first);
313 // Main malloc heap.
314 // The heap itself is the "free[]" and "large" arrays,
315 // but all the other global data is here too.
316 struct MHeap
318 Lock;
319 MSpan free[MaxMHeapList]; // free lists of given length
320 MSpan large; // free lists length >= MaxMHeapList
321 MSpan *allspans;
323 // span lookup
324 MHeapMap map;
326 // range of addresses we might see in the heap
327 byte *min;
328 byte *max;
330 // range of addresses we might see in a Native Client closure
331 byte *closure_min;
332 byte *closure_max;
334 // central free lists for small size classes.
335 // the union makes sure that the MCentrals are
336 // spaced 64 bytes apart, so that each MCentral.Lock
337 // gets its own cache line.
338 union {
339 MCentral;
340 byte pad[64];
341 } central[NumSizeClasses];
343 FixAlloc spanalloc; // allocator for Span*
344 FixAlloc cachealloc; // allocator for MCache*
346 extern MHeap runtime_mheap;
348 void runtime_MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
349 MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, int32 acct);
350 void runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct);
351 MSpan* runtime_MHeap_Lookup(MHeap *h, PageID p);
352 MSpan* runtime_MHeap_LookupMaybe(MHeap *h, PageID p);
353 void runtime_MGetSizeClassInfo(int32 sizeclass, int32 *size, int32 *npages, int32 *nobj);
355 void* runtime_mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed);
356 int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s, uint32 **ref);
357 void runtime_gc(int32 force);
359 void* runtime_SysAlloc(uintptr);
360 void runtime_SysUnused(void*, uintptr);
361 void runtime_SysFree(void*, uintptr);
363 enum
365 RefcountOverhead = 4, // one uint32 per object
367 RefFree = 0, // must be zero
368 RefStack, // stack segment - don't free and don't scan for pointers
369 RefNone, // no references
370 RefSome, // some references
371 RefNoPointers = 0x80000000U, // flag - no pointers here
372 RefHasFinalizer = 0x40000000U, // flag - has finalizer
373 RefProfiled = 0x20000000U, // flag - is in profiling table
374 RefNoProfiling = 0x10000000U, // flag - must not profile
375 RefFlags = 0xFFFF0000U,
378 void runtime_MProf_Malloc(void*, uintptr);
379 void runtime_MProf_Free(void*, uintptr);
380 void runtime_MProf_Mark(void (*scan)(byte *, int64));
382 // Malloc profiling settings.
383 // Must match definition in extern.go.
384 enum {
385 MProf_None = 0,
386 MProf_Sample = 1,
387 MProf_All = 2,
389 extern int32 runtime_malloc_profile;
391 typedef struct Finalizer Finalizer;
392 struct Finalizer
394 Finalizer *next; // for use by caller of getfinalizer
395 void (*fn)(void*);
396 void *arg;
397 const struct __go_func_type *ft;
400 Finalizer* runtime_getfinalizer(void*, bool);