2014-07-29 Ed Smith-Rowland <3dw4rd@verizon.net>
[official-gcc.git] / libgo / runtime / malloc.h
blob86b9fccf9046f2b405fff3c2636079a6e5d09333
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Memory allocator, based on tcmalloc.
6 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
8 // The main allocator works in runs of pages.
9 // Small allocation sizes (up to and including 32 kB) are
10 // rounded to one of about 100 size classes, each of which
11 // has its own free list of objects of exactly that size.
12 // Any free page of memory can be split into a set of objects
13 // of one size class, which are then managed using free list
14 // allocators.
16 // The allocator's data structures are:
18 // FixAlloc: a free-list allocator for fixed-size objects,
19 // used to manage storage used by the allocator.
20 // MHeap: the malloc heap, managed at page (4096-byte) granularity.
21 // MSpan: a run of pages managed by the MHeap.
22 // MCentral: a shared free list for a given size class.
23 // MCache: a per-thread (in Go, per-P) cache for small objects.
24 // MStats: allocation statistics.
26 // Allocating a small object proceeds up a hierarchy of caches:
28 // 1. Round the size up to one of the small size classes
29 // and look in the corresponding MCache free list.
30 // If the list is not empty, allocate an object from it.
31 // This can all be done without acquiring a lock.
33 // 2. If the MCache free list is empty, replenish it by
34 // taking a bunch of objects from the MCentral free list.
35 // Moving a bunch amortizes the cost of acquiring the MCentral lock.
37 // 3. If the MCentral free list is empty, replenish it by
38 // allocating a run of pages from the MHeap and then
39 // chopping that memory into a objects of the given size.
40 // Allocating many objects amortizes the cost of locking
41 // the heap.
43 // 4. If the MHeap is empty or has no page runs large enough,
44 // allocate a new group of pages (at least 1MB) from the
45 // operating system. Allocating a large run of pages
46 // amortizes the cost of talking to the operating system.
48 // Freeing a small object proceeds up the same hierarchy:
50 // 1. Look up the size class for the object and add it to
51 // the MCache free list.
53 // 2. If the MCache free list is too long or the MCache has
54 // too much memory, return some to the MCentral free lists.
56 // 3. If all the objects in a given span have returned to
57 // the MCentral list, return that span to the page heap.
59 // 4. If the heap has too much memory, return some to the
60 // operating system.
62 // TODO(rsc): Step 4 is not implemented.
64 // Allocating and freeing a large object uses the page heap
65 // directly, bypassing the MCache and MCentral free lists.
67 // The small objects on the MCache and MCentral free lists
68 // may or may not be zeroed. They are zeroed if and only if
69 // the second word of the object is zero. A span in the
70 // page heap is zeroed unless s->needzero is set. When a span
71 // is allocated to break into small objects, it is zeroed if needed
72 // and s->needzero is set. There are two main benefits to delaying the
73 // zeroing this way:
75 // 1. stack frames allocated from the small object lists
76 // or the page heap can avoid zeroing altogether.
77 // 2. the cost of zeroing when reusing a small object is
78 // charged to the mutator, not the garbage collector.
80 // This C code was written with an eye toward translating to Go
81 // in the future. Methods have the form Type_Method(Type *t, ...).
83 typedef struct MCentral MCentral;
84 typedef struct MHeap MHeap;
85 typedef struct MSpan MSpan;
86 typedef struct MStats MStats;
87 typedef struct MLink MLink;
88 typedef struct MTypes MTypes;
89 typedef struct GCStats GCStats;
91 enum
93 PageShift = 13,
94 PageSize = 1<<PageShift,
95 PageMask = PageSize - 1,
97 typedef uintptr PageID; // address >> PageShift
99 enum
101 // Computed constant. The definition of MaxSmallSize and the
102 // algorithm in msize.c produce some number of different allocation
103 // size classes. NumSizeClasses is that number. It's needed here
104 // because there are static arrays of this length; when msize runs its
105 // size choosing algorithm it double-checks that NumSizeClasses agrees.
106 NumSizeClasses = 67,
108 // Tunable constants.
109 MaxSmallSize = 32<<10,
111 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.goc.
112 TinySize = 16,
113 TinySizeClass = 2,
115 FixAllocChunk = 16<<10, // Chunk size for FixAlloc
116 MaxMHeapList = 1<<(20 - PageShift), // Maximum page length for fixed-size list in MHeap.
117 HeapAllocChunk = 1<<20, // Chunk size for heap growth
119 // Number of bits in page to span calculations (4k pages).
120 // On Windows 64-bit we limit the arena to 32GB or 35 bits (see below for reason).
121 // On other 64-bit platforms, we limit the arena to 128GB, or 37 bits.
122 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
123 #if __SIZEOF_POINTER__ == 8
124 #ifdef GOOS_windows
125 // Windows counts memory used by page table into committed memory
126 // of the process, so we can't reserve too much memory.
127 // See http://golang.org/issue/5402 and http://golang.org/issue/5236.
128 MHeapMap_Bits = 35 - PageShift,
129 #else
130 MHeapMap_Bits = 37 - PageShift,
131 #endif
132 #else
133 MHeapMap_Bits = 32 - PageShift,
134 #endif
136 // Max number of threads to run garbage collection.
137 // 2, 3, and 4 are all plausible maximums depending
138 // on the hardware details of the machine. The garbage
139 // collector scales well to 8 cpus.
140 MaxGcproc = 8,
143 // Maximum memory allocation size, a hint for callers.
144 // This must be a #define instead of an enum because it
145 // is so large.
146 #if __SIZEOF_POINTER__ == 8
147 #define MaxMem (1ULL<<(MHeapMap_Bits+PageShift)) /* 128 GB or 32 GB */
148 #else
149 #define MaxMem ((uintptr)-1)
150 #endif
152 // A generic linked list of blocks. (Typically the block is bigger than sizeof(MLink).)
153 struct MLink
155 MLink *next;
158 // SysAlloc obtains a large chunk of zeroed memory from the
159 // operating system, typically on the order of a hundred kilobytes
160 // or a megabyte.
161 // NOTE: SysAlloc returns OS-aligned memory, but the heap allocator
162 // may use larger alignment, so the caller must be careful to realign the
163 // memory obtained by SysAlloc.
165 // SysUnused notifies the operating system that the contents
166 // of the memory region are no longer needed and can be reused
167 // for other purposes.
168 // SysUsed notifies the operating system that the contents
169 // of the memory region are needed again.
171 // SysFree returns it unconditionally; this is only used if
172 // an out-of-memory error has been detected midway through
173 // an allocation. It is okay if SysFree is a no-op.
175 // SysReserve reserves address space without allocating memory.
176 // If the pointer passed to it is non-nil, the caller wants the
177 // reservation there, but SysReserve can still choose another
178 // location if that one is unavailable. On some systems and in some
179 // cases SysReserve will simply check that the address space is
180 // available and not actually reserve it. If SysReserve returns
181 // non-nil, it sets *reserved to true if the address space is
182 // reserved, false if it has merely been checked.
183 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator
184 // may use larger alignment, so the caller must be careful to realign the
185 // memory obtained by SysAlloc.
187 // SysMap maps previously reserved address space for use.
188 // The reserved argument is true if the address space was really
189 // reserved, not merely checked.
191 // SysFault marks a (already SysAlloc'd) region to fault
192 // if accessed. Used only for debugging the runtime.
194 void* runtime_SysAlloc(uintptr nbytes, uint64 *stat);
195 void runtime_SysFree(void *v, uintptr nbytes, uint64 *stat);
196 void runtime_SysUnused(void *v, uintptr nbytes);
197 void runtime_SysUsed(void *v, uintptr nbytes);
198 void runtime_SysMap(void *v, uintptr nbytes, bool reserved, uint64 *stat);
199 void* runtime_SysReserve(void *v, uintptr nbytes, bool *reserved);
200 void runtime_SysFault(void *v, uintptr nbytes);
202 // FixAlloc is a simple free-list allocator for fixed size objects.
203 // Malloc uses a FixAlloc wrapped around SysAlloc to manages its
204 // MCache and MSpan objects.
206 // Memory returned by FixAlloc_Alloc is not zeroed.
207 // The caller is responsible for locking around FixAlloc calls.
208 // Callers can keep state in the object but the first word is
209 // smashed by freeing and reallocating.
210 struct FixAlloc
212 uintptr size;
213 void (*first)(void *arg, byte *p); // called first time p is returned
214 void* arg;
215 MLink* list;
216 byte* chunk;
217 uint32 nchunk;
218 uintptr inuse; // in-use bytes now
219 uint64* stat;
222 void runtime_FixAlloc_Init(FixAlloc *f, uintptr size, void (*first)(void*, byte*), void *arg, uint64 *stat);
223 void* runtime_FixAlloc_Alloc(FixAlloc *f);
224 void runtime_FixAlloc_Free(FixAlloc *f, void *p);
227 // Statistics.
228 // Shared with Go: if you edit this structure, also edit type MemStats in mem.go.
229 struct MStats
231 // General statistics.
232 uint64 alloc; // bytes allocated and still in use
233 uint64 total_alloc; // bytes allocated (even if freed)
234 uint64 sys; // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
235 uint64 nlookup; // number of pointer lookups
236 uint64 nmalloc; // number of mallocs
237 uint64 nfree; // number of frees
239 // Statistics about malloc heap.
240 // protected by mheap.Lock
241 uint64 heap_alloc; // bytes allocated and still in use
242 uint64 heap_sys; // bytes obtained from system
243 uint64 heap_idle; // bytes in idle spans
244 uint64 heap_inuse; // bytes in non-idle spans
245 uint64 heap_released; // bytes released to the OS
246 uint64 heap_objects; // total number of allocated objects
248 // Statistics about allocation of low-level fixed-size structures.
249 // Protected by FixAlloc locks.
250 uint64 stacks_inuse; // bootstrap stacks
251 uint64 stacks_sys;
252 uint64 mspan_inuse; // MSpan structures
253 uint64 mspan_sys;
254 uint64 mcache_inuse; // MCache structures
255 uint64 mcache_sys;
256 uint64 buckhash_sys; // profiling bucket hash table
257 uint64 gc_sys;
258 uint64 other_sys;
260 // Statistics about garbage collector.
261 // Protected by mheap or stopping the world during GC.
262 uint64 next_gc; // next GC (in heap_alloc time)
263 uint64 last_gc; // last GC (in absolute time)
264 uint64 pause_total_ns;
265 uint64 pause_ns[256];
266 uint32 numgc;
267 bool enablegc;
268 bool debuggc;
270 // Statistics about allocation size classes.
271 struct {
272 uint32 size;
273 uint64 nmalloc;
274 uint64 nfree;
275 } by_size[NumSizeClasses];
278 extern MStats mstats
279 __asm__ (GOSYM_PREFIX "runtime.memStats");
280 void runtime_updatememstats(GCStats *stats);
282 // Size classes. Computed and initialized by InitSizes.
284 // SizeToClass(0 <= n <= MaxSmallSize) returns the size class,
285 // 1 <= sizeclass < NumSizeClasses, for n.
286 // Size class 0 is reserved to mean "not small".
288 // class_to_size[i] = largest size in class i
289 // class_to_allocnpages[i] = number of pages to allocate when
290 // making new objects in class i
292 int32 runtime_SizeToClass(int32);
293 uintptr runtime_roundupsize(uintptr);
294 extern int32 runtime_class_to_size[NumSizeClasses];
295 extern int32 runtime_class_to_allocnpages[NumSizeClasses];
296 extern int8 runtime_size_to_class8[1024/8 + 1];
297 extern int8 runtime_size_to_class128[(MaxSmallSize-1024)/128 + 1];
298 extern void runtime_InitSizes(void);
301 typedef struct MCacheList MCacheList;
302 struct MCacheList
304 MLink *list;
305 uint32 nlist;
308 // Per-thread (in Go, per-P) cache for small objects.
309 // No locking needed because it is per-thread (per-P).
310 struct MCache
312 // The following members are accessed on every malloc,
313 // so they are grouped here for better caching.
314 int32 next_sample; // trigger heap sample after allocating this many bytes
315 intptr local_cachealloc; // bytes allocated (or freed) from cache since last lock of heap
316 // Allocator cache for tiny objects w/o pointers.
317 // See "Tiny allocator" comment in malloc.goc.
318 byte* tiny;
319 uintptr tinysize;
320 // The rest is not accessed on every malloc.
321 MSpan* alloc[NumSizeClasses]; // spans to allocate from
322 MCacheList free[NumSizeClasses];// lists of explicitly freed objects
323 // Local allocator stats, flushed during GC.
324 uintptr local_nlookup; // number of pointer lookups
325 uintptr local_largefree; // bytes freed for large objects (>MaxSmallSize)
326 uintptr local_nlargefree; // number of frees for large objects (>MaxSmallSize)
327 uintptr local_nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
330 MSpan* runtime_MCache_Refill(MCache *c, int32 sizeclass);
331 void runtime_MCache_Free(MCache *c, MLink *p, int32 sizeclass, uintptr size);
332 void runtime_MCache_ReleaseAll(MCache *c);
334 // MTypes describes the types of blocks allocated within a span.
335 // The compression field describes the layout of the data.
337 // MTypes_Empty:
338 // All blocks are free, or no type information is available for
339 // allocated blocks.
340 // The data field has no meaning.
341 // MTypes_Single:
342 // The span contains just one block.
343 // The data field holds the type information.
344 // The sysalloc field has no meaning.
345 // MTypes_Words:
346 // The span contains multiple blocks.
347 // The data field points to an array of type [NumBlocks]uintptr,
348 // and each element of the array holds the type of the corresponding
349 // block.
350 // MTypes_Bytes:
351 // The span contains at most seven different types of blocks.
352 // The data field points to the following structure:
353 // struct {
354 // type [8]uintptr // type[0] is always 0
355 // index [NumBlocks]byte
356 // }
357 // The type of the i-th block is: data.type[data.index[i]]
358 enum
360 MTypes_Empty = 0,
361 MTypes_Single = 1,
362 MTypes_Words = 2,
363 MTypes_Bytes = 3,
365 struct MTypes
367 byte compression; // one of MTypes_*
368 uintptr data;
371 enum
373 KindSpecialFinalizer = 1,
374 KindSpecialProfile = 2,
375 // Note: The finalizer special must be first because if we're freeing
376 // an object, a finalizer special will cause the freeing operation
377 // to abort, and we want to keep the other special records around
378 // if that happens.
381 typedef struct Special Special;
382 struct Special
384 Special* next; // linked list in span
385 uint16 offset; // span offset of object
386 byte kind; // kind of Special
389 // The described object has a finalizer set for it.
390 typedef struct SpecialFinalizer SpecialFinalizer;
391 struct SpecialFinalizer
393 Special;
394 FuncVal* fn;
395 const FuncType* ft;
396 const PtrType* ot;
399 // The described object is being heap profiled.
400 typedef struct Bucket Bucket; // from mprof.goc
401 typedef struct SpecialProfile SpecialProfile;
402 struct SpecialProfile
404 Special;
405 Bucket* b;
408 // An MSpan is a run of pages.
409 enum
411 MSpanInUse = 0,
412 MSpanFree,
413 MSpanListHead,
414 MSpanDead,
416 struct MSpan
418 MSpan *next; // in a span linked list
419 MSpan *prev; // in a span linked list
420 PageID start; // starting page number
421 uintptr npages; // number of pages in span
422 MLink *freelist; // list of free objects
423 // sweep generation:
424 // if sweepgen == h->sweepgen - 2, the span needs sweeping
425 // if sweepgen == h->sweepgen - 1, the span is currently being swept
426 // if sweepgen == h->sweepgen, the span is swept and ready to use
427 // h->sweepgen is incremented by 2 after every GC
428 uint32 sweepgen;
429 uint16 ref; // capacity - number of objects in freelist
430 uint8 sizeclass; // size class
431 bool incache; // being used by an MCache
432 uint8 state; // MSpanInUse etc
433 uint8 needzero; // needs to be zeroed before allocation
434 uintptr elemsize; // computed from sizeclass or from npages
435 int64 unusedsince; // First time spotted by GC in MSpanFree state
436 uintptr npreleased; // number of pages released to the OS
437 byte *limit; // end of data in span
438 MTypes types; // types of allocated objects in this span
439 Lock specialLock; // guards specials list
440 Special *specials; // linked list of special records sorted by offset.
441 MLink *freebuf; // objects freed explicitly, not incorporated into freelist yet
444 void runtime_MSpan_Init(MSpan *span, PageID start, uintptr npages);
445 void runtime_MSpan_EnsureSwept(MSpan *span);
446 bool runtime_MSpan_Sweep(MSpan *span);
448 // Every MSpan is in one doubly-linked list,
449 // either one of the MHeap's free lists or one of the
450 // MCentral's span lists. We use empty MSpan structures as list heads.
451 void runtime_MSpanList_Init(MSpan *list);
452 bool runtime_MSpanList_IsEmpty(MSpan *list);
453 void runtime_MSpanList_Insert(MSpan *list, MSpan *span);
454 void runtime_MSpanList_InsertBack(MSpan *list, MSpan *span);
455 void runtime_MSpanList_Remove(MSpan *span); // from whatever list it is in
458 // Central list of free objects of a given size.
459 struct MCentral
461 Lock;
462 int32 sizeclass;
463 MSpan nonempty; // list of spans with a free object
464 MSpan empty; // list of spans with no free objects (or cached in an MCache)
465 int32 nfree; // # of objects available in nonempty spans
468 void runtime_MCentral_Init(MCentral *c, int32 sizeclass);
469 MSpan* runtime_MCentral_CacheSpan(MCentral *c);
470 void runtime_MCentral_UncacheSpan(MCentral *c, MSpan *s);
471 bool runtime_MCentral_FreeSpan(MCentral *c, MSpan *s, int32 n, MLink *start, MLink *end);
472 void runtime_MCentral_FreeList(MCentral *c, MLink *start); // TODO: need this?
474 // Main malloc heap.
475 // The heap itself is the "free[]" and "large" arrays,
476 // but all the other global data is here too.
477 struct MHeap
479 Lock;
480 MSpan free[MaxMHeapList]; // free lists of given length
481 MSpan freelarge; // free lists length >= MaxMHeapList
482 MSpan busy[MaxMHeapList]; // busy lists of large objects of given length
483 MSpan busylarge; // busy lists of large objects length >= MaxMHeapList
484 MSpan **allspans; // all spans out there
485 MSpan **sweepspans; // copy of allspans referenced by sweeper
486 uint32 nspan;
487 uint32 nspancap;
488 uint32 sweepgen; // sweep generation, see comment in MSpan
489 uint32 sweepdone; // all spans are swept
491 // span lookup
492 MSpan** spans;
493 uintptr spans_mapped;
495 // range of addresses we might see in the heap
496 byte *bitmap;
497 uintptr bitmap_mapped;
498 byte *arena_start;
499 byte *arena_used;
500 byte *arena_end;
501 bool arena_reserved;
503 // central free lists for small size classes.
504 // the padding makes sure that the MCentrals are
505 // spaced CacheLineSize bytes apart, so that each MCentral.Lock
506 // gets its own cache line.
507 struct {
508 MCentral;
509 byte pad[64];
510 } central[NumSizeClasses];
512 FixAlloc spanalloc; // allocator for Span*
513 FixAlloc cachealloc; // allocator for MCache*
514 FixAlloc specialfinalizeralloc; // allocator for SpecialFinalizer*
515 FixAlloc specialprofilealloc; // allocator for SpecialProfile*
516 Lock speciallock; // lock for sepcial record allocators.
518 // Malloc stats.
519 uint64 largefree; // bytes freed for large objects (>MaxSmallSize)
520 uint64 nlargefree; // number of frees for large objects (>MaxSmallSize)
521 uint64 nsmallfree[NumSizeClasses]; // number of frees for small objects (<=MaxSmallSize)
523 extern MHeap runtime_mheap;
525 void runtime_MHeap_Init(MHeap *h);
526 MSpan* runtime_MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass, bool large, bool needzero);
527 void runtime_MHeap_Free(MHeap *h, MSpan *s, int32 acct);
528 MSpan* runtime_MHeap_Lookup(MHeap *h, void *v);
529 MSpan* runtime_MHeap_LookupMaybe(MHeap *h, void *v);
530 void runtime_MGetSizeClassInfo(int32 sizeclass, uintptr *size, int32 *npages, int32 *nobj);
531 void* runtime_MHeap_SysAlloc(MHeap *h, uintptr n);
532 void runtime_MHeap_MapBits(MHeap *h);
533 void runtime_MHeap_MapSpans(MHeap *h);
534 void runtime_MHeap_Scavenger(void*);
535 void runtime_MHeap_SplitSpan(MHeap *h, MSpan *s);
537 void* runtime_mallocgc(uintptr size, uintptr typ, uint32 flag);
538 void* runtime_persistentalloc(uintptr size, uintptr align, uint64 *stat);
539 int32 runtime_mlookup(void *v, byte **base, uintptr *size, MSpan **s);
540 void runtime_gc(int32 force);
541 uintptr runtime_sweepone(void);
542 void runtime_markscan(void *v);
543 void runtime_marknogc(void *v);
544 void runtime_checkallocated(void *v, uintptr n);
545 void runtime_markfreed(void *v);
546 void runtime_checkfreed(void *v, uintptr n);
547 extern int32 runtime_checking;
548 void runtime_markspan(void *v, uintptr size, uintptr n, bool leftover);
549 void runtime_unmarkspan(void *v, uintptr size);
550 void runtime_purgecachedstats(MCache*);
551 void* runtime_cnew(const Type*);
552 void* runtime_cnewarray(const Type*, intgo);
553 void runtime_tracealloc(void*, uintptr, uintptr);
554 void runtime_tracefree(void*, uintptr);
555 void runtime_tracegc(void);
557 uintptr runtime_gettype(void*);
559 enum
561 // flags to malloc
562 FlagNoScan = 1<<0, // GC doesn't have to scan object
563 FlagNoProfiling = 1<<1, // must not profile
564 FlagNoGC = 1<<2, // must not free or scan for pointers
565 FlagNoZero = 1<<3, // don't zero memory
566 FlagNoInvokeGC = 1<<4, // don't invoke GC
569 typedef struct Obj Obj;
570 struct Obj
572 byte *p; // data pointer
573 uintptr n; // size of data in bytes
574 uintptr ti; // type info
577 void runtime_MProf_Malloc(void*, uintptr);
578 void runtime_MProf_Free(Bucket*, uintptr, bool);
579 void runtime_MProf_GC(void);
580 void runtime_iterate_memprof(void (*callback)(Bucket*, uintptr, Location*, uintptr, uintptr, uintptr));
581 int32 runtime_gcprocs(void);
582 void runtime_helpgc(int32 nproc);
583 void runtime_gchelper(void);
584 void runtime_createfing(void);
585 G* runtime_wakefing(void);
586 extern bool runtime_fingwait;
587 extern bool runtime_fingwake;
589 void runtime_setprofilebucket(void *p, Bucket *b);
591 struct __go_func_type;
592 struct __go_ptr_type;
593 bool runtime_addfinalizer(void *p, FuncVal *fn, const struct __go_func_type*, const struct __go_ptr_type*);
594 void runtime_removefinalizer(void*);
595 void runtime_queuefinalizer(void *p, FuncVal *fn, const struct __go_func_type *ft, const struct __go_ptr_type *ot);
597 void runtime_freeallspecials(MSpan *span, void *p, uintptr size);
598 bool runtime_freespecial(Special *s, void *p, uintptr size, bool freed);
600 enum
602 TypeInfo_SingleObject = 0,
603 TypeInfo_Array = 1,
604 TypeInfo_Chan = 2,
606 // Enables type information at the end of blocks allocated from heap
607 DebugTypeAtBlockEnd = 0,
610 // Information from the compiler about the layout of stack frames.
611 typedef struct BitVector BitVector;
612 struct BitVector
614 int32 n; // # of bits
615 uint32 *data;
617 typedef struct StackMap StackMap;
618 struct StackMap
620 int32 n; // number of bitmaps
621 int32 nbit; // number of bits in each bitmap
622 uint32 data[];
624 enum {
625 // Pointer map
626 BitsPerPointer = 2,
627 BitsDead = 0,
628 BitsScalar = 1,
629 BitsPointer = 2,
630 BitsMultiWord = 3,
631 // BitsMultiWord will be set for the first word of a multi-word item.
632 // When it is set, one of the following will be set for the second word.
633 BitsString = 0,
634 BitsSlice = 1,
635 BitsIface = 2,
636 BitsEface = 3,
638 // Returns pointer map data for the given stackmap index
639 // (the index is encoded in PCDATA_StackMapIndex).
640 BitVector runtime_stackmapdata(StackMap *stackmap, int32 n);
642 // defined in mgc0.go
643 void runtime_gc_m_ptr(Eface*);
644 void runtime_gc_g_ptr(Eface*);
645 void runtime_gc_itab_ptr(Eface*);
647 void runtime_memorydump(void);
648 int32 runtime_setgcpercent(int32);
650 // Value we use to mark dead pointers when GODEBUG=gcdead=1.
651 #define PoisonGC ((uintptr)0xf969696969696969ULL)
652 #define PoisonStack ((uintptr)0x6868686868686868ULL)
654 struct Workbuf;
655 void runtime_MProf_Mark(struct Workbuf**, void (*)(struct Workbuf**, Obj));
656 void runtime_proc_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
657 void runtime_time_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));
658 void runtime_netpoll_scan(struct Workbuf**, void (*)(struct Workbuf**, Obj));