1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This was originally based on tcmalloc, but has diverged quite a bit.
8 // http://goog-perftools.sourceforge.net/doc/tcmalloc.html
10 // The main allocator works in runs of pages.
11 // Small allocation sizes (up to and including 32 kB) are
12 // rounded to one of about 70 size classes, each of which
13 // has its own free set of objects of exactly that size.
14 // Any free page of memory can be split into a set of objects
15 // of one size class, which are then managed using a free bitmap.
17 // The allocator's data structures are:
19 // fixalloc: a free-list allocator for fixed-size off-heap objects,
20 // used to manage storage used by the allocator.
21 // mheap: the malloc heap, managed at page (8192-byte) granularity.
22 // mspan: a run of pages managed by the mheap.
23 // mcentral: collects all spans of a given size class.
24 // mcache: a per-P cache of mspans with free space.
25 // mstats: allocation statistics.
27 // Allocating a small object proceeds up a hierarchy of caches:
29 // 1. Round the size up to one of the small size classes
30 // and look in the corresponding mspan in this P's mcache.
31 // Scan the mspan's free bitmap to find a free slot.
32 // If there is a free slot, allocate it.
33 // This can all be done without acquiring a lock.
35 // 2. If the mspan has no free slots, obtain a new mspan
36 // from the mcentral's list of mspans of the required size
37 // class that have free space.
38 // Obtaining a whole span amortizes the cost of locking
41 // 3. If the mcentral's mspan list is empty, obtain a run
42 // of pages from the mheap to use for the mspan.
44 // 4. If the mheap is empty or has no page runs large enough,
45 // allocate a new group of pages (at least 1MB) from the
46 // operating system. Allocating a large run of pages
47 // amortizes the cost of talking to the operating system.
49 // Sweeping an mspan and freeing objects on it proceeds up a similar
52 // 1. If the mspan is being swept in response to allocation, it
53 // is returned to the mcache to satisfy the allocation.
55 // 2. Otherwise, if the mspan still has allocated objects in it,
56 // it is placed on the mcentral free list for the mspan's size
59 // 3. Otherwise, if all objects in the mspan are free, the mspan
60 // is now "idle", so it is returned to the mheap and no longer
62 // This may coalesce it with adjacent idle mspans.
64 // 4. If an mspan remains idle for long enough, return its pages
65 // to the operating system.
67 // Allocating and freeing a large object uses the mheap
68 // directly, bypassing the mcache and mcentral.
70 // Free object slots in an mspan are zeroed only if mspan.needzero is
71 // false. If needzero is true, objects are zeroed as they are
72 // allocated. There are various benefits to delaying zeroing this way:
74 // 1. Stack frame allocation can avoid zeroing altogether.
76 // 2. It exhibits better temporal locality, since the program is
77 // probably about to write to the memory.
79 // 3. We don't zero pages that never get reused.
84 "runtime/internal/sys"
88 // C function to get the end of the program's memory.
91 // For gccgo, use go:linkname to rename compiler-called functions to
92 // themselves, so that the compiler will export them.
94 //go:linkname newobject runtime.newobject
96 // Functions called by C code.
97 //go:linkname mallocgc runtime.mallocgc
102 maxTinySize
= _TinySize
103 tinySizeClass
= _TinySizeClass
104 maxSmallSize
= _MaxSmallSize
106 pageShift
= _PageShift
109 // By construction, single page spans of the smallest object class
110 // have the most objects per span.
111 maxObjsPerSpan
= pageSize
/ 8
113 mSpanInUse
= _MSpanInUse
115 concurrentSweep
= _ConcurrentSweep
117 _PageSize
= 1 << _PageShift
118 _PageMask
= _PageSize
- 1
120 // _64bit = 1 on 64-bit systems, 0 on 32-bit systems
121 _64bit
= 1 << (^uintptr(0) >> 63) / 2
123 // Tiny allocator parameters, see "Tiny allocator" comment in malloc.go.
127 _FixAllocChunk
= 16 << 10 // Chunk size for FixAlloc
128 _MaxMHeapList
= 1 << (20 - _PageShift
) // Maximum page length for fixed-size list in MHeap.
129 _HeapAllocChunk
= 1 << 20 // Chunk size for heap growth
131 // Per-P, per order stack segment cache size.
132 _StackCacheSize
= 32 * 1024
134 // Number of orders that get caching. Order 0 is FixedStack
135 // and each successive order is twice as large.
136 // We want to cache 2KB, 4KB, 8KB, and 16KB stacks. Larger stacks
137 // will be allocated directly.
138 // Since FixedStack is different on different systems, we
139 // must vary NumStackOrders to keep the same maximum cached size.
140 // OS | FixedStack | NumStackOrders
141 // -----------------+------------+---------------
142 // linux/darwin/bsd | 2KB | 4
143 // windows/32 | 4KB | 3
144 // windows/64 | 8KB | 2
146 _NumStackOrders
= 4 - sys
.PtrSize
/4*sys
.GoosWindows
- 1*sys
.GoosPlan9
148 // Number of bits in page to span calculations (4k pages).
149 // On Windows 64-bit we limit the arena to 32GB or 35 bits.
150 // Windows counts memory used by page table into committed memory
151 // of the process, so we can't reserve too much memory.
152 // See https://golang.org/issue/5402 and https://golang.org/issue/5236.
153 // On other 64-bit platforms, we limit the arena to 512GB, or 39 bits.
154 // On 32-bit, we don't bother limiting anything, so we use the full 32-bit address.
155 // The only exception is mips32 which only has access to low 2GB of virtual memory.
156 // On Darwin/arm64, we cannot reserve more than ~5GB of virtual memory,
157 // but as most devices have less than 4GB of physical memory anyway, we
158 // try to be conservative here, and only ask for a 2GB heap.
159 _MHeapMap_TotalBits
= (_64bit
*sys
.GoosWindows
)*35 + (_64bit
*(1-sys
.GoosWindows
)*(1-sys
.GoosDarwin
*sys
.GoarchArm64
))*39 + sys
.GoosDarwin
*sys
.GoarchArm64
*31 + (1-_64bit
)*(32-(sys
.GoarchMips
+sys
.GoarchMipsle
))
160 _MHeapMap_Bits
= _MHeapMap_TotalBits
- _PageShift
162 _MaxMem
= uintptr(1<<_MHeapMap_TotalBits
- 1)
164 // Max number of threads to run garbage collection.
165 // 2, 3, and 4 are all plausible maximums depending
166 // on the hardware details of the machine. The garbage
167 // collector scales well to 32 cpus.
170 _MaxArena32
= 1<<32 - 1
172 // minLegalPointer is the smallest possible legal pointer.
173 // This is the smallest possible architectural page size,
174 // since we assume that the first page is never mapped.
176 // This should agree with minZeroPage in the compiler.
177 minLegalPointer
uintptr = 4096
180 // physPageSize is the size in bytes of the OS's physical pages.
181 // Mapping and unmapping operations must be done at multiples of
184 // This must be set by the OS init code (typically in osinit) before
186 var physPageSize
uintptr
188 // OS-defined helpers:
190 // sysAlloc obtains a large chunk of zeroed memory from the
191 // operating system, typically on the order of a hundred kilobytes
193 // NOTE: sysAlloc returns OS-aligned memory, but the heap allocator
194 // may use larger alignment, so the caller must be careful to realign the
195 // memory obtained by sysAlloc.
197 // SysUnused notifies the operating system that the contents
198 // of the memory region are no longer needed and can be reused
199 // for other purposes.
200 // SysUsed notifies the operating system that the contents
201 // of the memory region are needed again.
203 // SysFree returns it unconditionally; this is only used if
204 // an out-of-memory error has been detected midway through
205 // an allocation. It is okay if SysFree is a no-op.
207 // SysReserve reserves address space without allocating memory.
208 // If the pointer passed to it is non-nil, the caller wants the
209 // reservation there, but SysReserve can still choose another
210 // location if that one is unavailable. On some systems and in some
211 // cases SysReserve will simply check that the address space is
212 // available and not actually reserve it. If SysReserve returns
213 // non-nil, it sets *reserved to true if the address space is
214 // reserved, false if it has merely been checked.
215 // NOTE: SysReserve returns OS-aligned memory, but the heap allocator
216 // may use larger alignment, so the caller must be careful to realign the
217 // memory obtained by sysAlloc.
219 // SysMap maps previously reserved address space for use.
220 // The reserved argument is true if the address space was really
221 // reserved, not merely checked.
223 // SysFault marks a (already sysAlloc'd) region to fault
224 // if accessed. Used only for debugging the runtime.
227 if class_to_size
[_TinySizeClass
] != _TinySize
{
228 throw("bad TinySizeClass")
231 // Not used for gccgo.
234 // Copy class sizes out for statistics table.
235 for i
:= range class_to_size
{
236 memstats
.by_size
[i
].size
= uint32(class_to_size
[i
])
239 // Check physPageSize.
240 if physPageSize
== 0 {
241 // The OS init code failed to fetch the physical page size.
242 throw("failed to get system page size")
244 if physPageSize
< minPhysPageSize
{
245 print("system page size (", physPageSize
, ") is smaller than minimum page size (", minPhysPageSize
, ")\n")
246 throw("bad system page size")
248 if physPageSize
&(physPageSize
-1) != 0 {
249 print("system page size (", physPageSize
, ") must be a power of 2\n")
250 throw("bad system page size")
253 var p
, bitmapSize
, spansSize
, pSize
, limit
uintptr
256 // limit = runtime.memlimit();
257 // See https://golang.org/issue/5049
258 // TODO(rsc): Fix after 1.1.
261 // Set up the allocation arena, a contiguous area of memory where
262 // allocated data will be found. The arena begins with a bitmap large
263 // enough to hold 2 bits per allocated word.
264 if sys
.PtrSize
== 8 && (limit
== 0 || limit
> 1<<30) {
265 // On a 64-bit machine, allocate from a single contiguous reservation.
266 // 512 GB (MaxMem) should be big enough for now.
268 // The code will work with the reservation at any address, but ask
269 // SysReserve to use 0x0000XXc000000000 if possible (XX=00...7f).
270 // Allocating a 512 GB region takes away 39 bits, and the amd64
271 // doesn't let us choose the top 17 bits, so that leaves the 9 bits
272 // in the middle of 0x00c0 for us to choose. Choosing 0x00c0 means
273 // that the valid memory addresses will begin 0x00c0, 0x00c1, ..., 0x00df.
274 // In little-endian, that's c0 00, c1 00, ..., df 00. None of those are valid
275 // UTF-8 sequences, and they are otherwise as far away from
276 // ff (likely a common byte) as possible. If that fails, we try other 0xXXc0
277 // addresses. An earlier attempt to use 0x11f8 caused out of memory errors
278 // on OS X during thread allocations. 0x00c0 causes conflicts with
279 // AddressSanitizer which reserves all memory up to 0x0100.
280 // These choices are both for debuggability and to reduce the
281 // odds of a conservative garbage collector (as is still used in gccgo)
282 // not collecting memory because some non-pointer block of memory
283 // had a bit pattern that matched a memory address.
285 // Actually we reserve 544 GB (because the bitmap ends up being 32 GB)
286 // but it hardly matters: e0 00 is not valid UTF-8 either.
288 // If this fails we fall back to the 32 bit memory mechanism
290 // However, on arm64, we ignore all this advice above and slam the
291 // allocation at 0x40 << 32 because when using 4k pages with 3-level
292 // translation buffers, the user address space is limited to 39 bits
293 // On darwin/arm64, the address space is even smaller.
294 arenaSize
:= round(_MaxMem
, _PageSize
)
295 bitmapSize
= arenaSize
/ (sys
.PtrSize
* 8 / 2)
296 spansSize
= arenaSize
/ _PageSize
* sys
.PtrSize
297 spansSize
= round(spansSize
, _PageSize
)
298 for i
:= 0; i
<= 0x7f; i
++ {
300 case GOARCH
== "arm64" && GOOS
== "darwin":
301 p
= uintptr(i
)<<40 | uintptrMask
&(0x0013<<28)
302 case GOARCH
== "arm64":
303 p
= uintptr(i
)<<40 | uintptrMask
&(0x0040<<32)
305 p
= uintptr(i
)<<40 | uintptrMask
&(0x00c0<<32)
307 pSize
= bitmapSize
+ spansSize
+ arenaSize
+ _PageSize
308 p
= uintptr(sysReserve(unsafe
.Pointer(p
), pSize
, &reserved
))
316 // On a 32-bit machine, we can't typically get away
317 // with a giant virtual address space reservation.
318 // Instead we map the memory information bitmap
319 // immediately after the data segment, large enough
320 // to handle the entire 4GB address space (256 MB),
321 // along with a reservation for an initial arena.
322 // When that gets used up, we'll start asking the kernel
323 // for any memory anywhere.
325 // If we fail to allocate, try again with a smaller arena.
326 // This is necessary on Android L where we share a process
327 // with ART, which reserves virtual memory aggressively.
328 // In the worst case, fall back to a 0-sized initial arena,
329 // in the hope that subsequent reservations will succeed.
330 arenaSizes
:= [...]uintptr{
337 for _
, arenaSize
:= range &arenaSizes
{
338 bitmapSize
= (_MaxArena32
+ 1) / (sys
.PtrSize
* 8 / 2)
339 spansSize
= (_MaxArena32
+ 1) / _PageSize
* sys
.PtrSize
340 if limit
> 0 && arenaSize
+bitmapSize
+spansSize
> limit
{
341 bitmapSize
= (limit
/ 9) &^ ((1 << _PageShift
) - 1)
342 arenaSize
= bitmapSize
* 8
343 spansSize
= arenaSize
/ _PageSize
* sys
.PtrSize
345 spansSize
= round(spansSize
, _PageSize
)
347 // SysReserve treats the address we ask for, end, as a hint,
348 // not as an absolute requirement. If we ask for the end
349 // of the data segment but the operating system requires
350 // a little more space before we can start allocating, it will
351 // give out a slightly higher pointer. Except QEMU, which
352 // is buggy, as usual: it won't adjust the pointer upward.
353 // So adjust it upward a little bit ourselves: 1/4 MB to get
354 // away from the running binary image and then round up
356 p
= round(getEnd()+(1<<18), 1<<20)
357 pSize
= bitmapSize
+ spansSize
+ arenaSize
+ _PageSize
358 p
= uintptr(sysReserve(unsafe
.Pointer(p
), pSize
, &reserved
))
364 throw("runtime: cannot reserve arena virtual address space")
368 // PageSize can be larger than OS definition of page size,
369 // so SysReserve can give us a PageSize-unaligned pointer.
370 // To overcome this we ask for PageSize more and round up the pointer.
371 p1
:= round(p
, _PageSize
)
374 mheap_
.bitmap
= p1
+ spansSize
+ bitmapSize
375 if sys
.PtrSize
== 4 {
376 // Set arena_start such that we can accept memory
377 // reservations located anywhere in the 4GB virtual space.
378 mheap_
.arena_start
= 0
380 mheap_
.arena_start
= p1
+ (spansSize
+ bitmapSize
)
382 mheap_
.arena_end
= p
+ pSize
383 mheap_
.arena_used
= p1
+ (spansSize
+ bitmapSize
)
384 mheap_
.arena_reserved
= reserved
386 if mheap_
.arena_start
&(_PageSize
-1) != 0 {
387 println("bad pagesize", hex(p
), hex(p1
), hex(spansSize
), hex(bitmapSize
), hex(_PageSize
), "start", hex(mheap_
.arena_start
))
388 throw("misrounded allocation in mallocinit")
391 // Initialize the rest of the allocator.
392 mheap_
.init(spansStart
, spansSize
)
394 _g_
.m
.mcache
= allocmcache()
397 // sysAlloc allocates the next n bytes from the heap arena. The
398 // returned pointer is always _PageSize aligned and between
399 // h.arena_start and h.arena_end. sysAlloc returns nil on failure.
400 // There is no corresponding free function.
401 func (h
*mheap
) sysAlloc(n
uintptr) unsafe
.Pointer
{
402 if n
> h
.arena_end
-h
.arena_used
{
403 // We are in 32-bit mode, maybe we didn't use all possible address space yet.
404 // Reserve some more space.
405 p_size
:= round(n
+_PageSize
, 256<<20)
406 new_end
:= h
.arena_end
+ p_size
// Careful: can overflow
407 if h
.arena_end
<= new_end
&& new_end
-h
.arena_start
-1 <= _MaxArena32
{
408 // TODO: It would be bad if part of the arena
409 // is reserved and part is not.
411 p
:= uintptr(sysReserve(unsafe
.Pointer(h
.arena_end
), p_size
, &reserved
))
415 // p can be just about anywhere in the address
416 // space, including before arena_end.
417 if p
== h
.arena_end
{
418 h
.arena_end
= new_end
419 h
.arena_reserved
= reserved
420 } else if h
.arena_end
< p
&& p
+p_size
-h
.arena_start
-1 <= _MaxArena32
{
421 // Keep everything page-aligned.
422 // Our pages are bigger than hardware pages.
423 h
.arena_end
= p
+ p_size
424 used
:= p
+ (-p
& (_PageSize
- 1))
428 h
.arena_reserved
= reserved
430 // We got a mapping, but it's not
431 // linear with our current arena, so
434 // TODO: Make it possible to allocate
435 // from this. We can't decrease
436 // arena_used, but we could introduce
437 // a new variable for the current
438 // allocation position.
440 // We haven't added this allocation to
441 // the stats, so subtract it from a
442 // fake stat (but avoid underflow).
443 stat
:= uint64(p_size
)
444 sysFree(unsafe
.Pointer(p
), p_size
, &stat
)
449 if n
<= h
.arena_end
-h
.arena_used
{
450 // Keep taking from our reservation.
452 sysMap(unsafe
.Pointer(p
), n
, h
.arena_reserved
, &memstats
.heap_sys
)
457 racemapshadow(unsafe
.Pointer(p
), n
)
460 if p
&(_PageSize
-1) != 0 {
461 throw("misrounded allocation in MHeap_SysAlloc")
463 return unsafe
.Pointer(p
)
466 // If using 64-bit, our reservation is all we have.
467 if h
.arena_end
-h
.arena_start
> _MaxArena32
{
471 // On 32-bit, once the reservation is gone we can
472 // try to get memory at a location chosen by the OS.
473 p_size
:= round(n
, _PageSize
) + _PageSize
474 p
:= uintptr(sysAlloc(p_size
, &memstats
.heap_sys
))
479 if p
< h
.arena_start || p
+p_size
-h
.arena_start
> _MaxArena32
{
481 if top
-h
.arena_start
-1 > _MaxArena32
{
482 top
= h
.arena_start
+ _MaxArena32
+ 1
484 print("runtime: memory allocated by OS (", hex(p
), ") not in usable range [", hex(h
.arena_start
), ",", hex(top
), ")\n")
485 sysFree(unsafe
.Pointer(p
), p_size
, &memstats
.heap_sys
)
490 p
+= -p
& (_PageSize
- 1)
491 if p
+n
> h
.arena_used
{
495 if p_end
> h
.arena_end
{
499 racemapshadow(unsafe
.Pointer(p
), n
)
503 if p
&(_PageSize
-1) != 0 {
504 throw("misrounded allocation in MHeap_SysAlloc")
506 return unsafe
.Pointer(p
)
509 // base address for all 0-byte allocations
512 // nextFreeFast returns the next free object if one is quickly available.
513 // Otherwise it returns 0.
514 func nextFreeFast(s
*mspan
) gclinkptr
{
515 theBit
:= sys
.Ctz64(s
.allocCache
) // Is there a free object in the allocCache?
517 result
:= s
.freeindex
+ uintptr(theBit
)
518 if result
< s
.nelems
{
519 freeidx
:= result
+ 1
520 if freeidx%64
== 0 && freeidx
!= s
.nelems
{
523 s
.allocCache
>>= (theBit
+ 1)
524 s
.freeindex
= freeidx
525 v
:= gclinkptr(result
*s
.elemsize
+ s
.base())
533 // nextFree returns the next free object from the cached span if one is available.
534 // Otherwise it refills the cache with a span with an available object and
535 // returns that object along with a flag indicating that this was a heavy
536 // weight allocation. If it is a heavy weight allocation the caller must
537 // determine whether a new GC cycle needs to be started or if the GC is active
538 // whether this goroutine needs to assist the GC.
539 func (c
*mcache
) nextFree(sizeclass
uint8) (v gclinkptr
, s
*mspan
, shouldhelpgc
bool) {
540 s
= c
.alloc
[sizeclass
]
542 freeIndex
:= s
.nextFreeIndex()
543 if freeIndex
== s
.nelems
{
545 if uintptr(s
.allocCount
) != s
.nelems
{
546 println("runtime: s.allocCount=", s
.allocCount
, "s.nelems=", s
.nelems
)
547 throw("s.allocCount != s.nelems && freeIndex == s.nelems")
550 c
.refill(int32(sizeclass
))
553 s
= c
.alloc
[sizeclass
]
555 freeIndex
= s
.nextFreeIndex()
558 if freeIndex
>= s
.nelems
{
559 throw("freeIndex is not valid")
562 v
= gclinkptr(freeIndex
*s
.elemsize
+ s
.base())
564 if uintptr(s
.allocCount
) > s
.nelems
{
565 println("s.allocCount=", s
.allocCount
, "s.nelems=", s
.nelems
)
566 throw("s.allocCount > s.nelems")
571 // Allocate an object of size bytes.
572 // Small objects are allocated from the per-P cache's free lists.
573 // Large objects (> 32 kB) are allocated straight from the heap.
574 func mallocgc(size
uintptr, typ
*_type
, needzero
bool) unsafe
.Pointer
{
575 if gcphase
== _GCmarktermination
{
576 throw("mallocgc called with gcphase == _GCmarktermination")
580 return unsafe
.Pointer(&zerobase
)
586 align
= uintptr(typ
.align
)
588 return persistentalloc(size
, align
, &memstats
.other_sys
)
591 // When using gccgo, when a cgo or SWIG function has an
592 // interface return type and the function returns a
593 // non-pointer, memory allocation occurs after syscall.Cgocall
594 // but before syscall.CgocallDone. Treat this allocation as a
597 if gomcache() == nil && getg().m
.ncgo
> 0 {
602 // assistG is the G to charge for this allocation, or nil if
603 // GC is not currently active.
605 if gcBlackenEnabled
!= 0 {
606 // Charge the current user G for this allocation.
608 if assistG
.m
.curg
!= nil {
609 assistG
= assistG
.m
.curg
611 // Charge the allocation against the G. We'll account
612 // for internal fragmentation at the end of mallocgc.
613 assistG
.gcAssistBytes
-= int64(size
)
615 if assistG
.gcAssistBytes
< 0 {
616 // This G is in debt. Assist the GC to correct
617 // this before allocating. This must happen
618 // before disabling preemption.
619 gcAssistAlloc(assistG
)
623 // Set mp.mallocing to keep from being preempted by GC.
625 if mp
.mallocing
!= 0 {
626 throw("malloc deadlock")
628 if mp
.gsignal
== getg() {
629 throw("malloc during signal")
633 shouldhelpgc
:= false
637 noscan
:= typ
== nil || typ
.kind
&kindNoPointers
!= 0
638 if size
<= maxSmallSize
{
639 if noscan
&& size
< maxTinySize
{
642 // Tiny allocator combines several tiny allocation requests
643 // into a single memory block. The resulting memory block
644 // is freed when all subobjects are unreachable. The subobjects
645 // must be noscan (don't have pointers), this ensures that
646 // the amount of potentially wasted memory is bounded.
648 // Size of the memory block used for combining (maxTinySize) is tunable.
649 // Current setting is 16 bytes, which relates to 2x worst case memory
650 // wastage (when all but one subobjects are unreachable).
651 // 8 bytes would result in no wastage at all, but provides less
652 // opportunities for combining.
653 // 32 bytes provides more opportunities for combining,
654 // but can lead to 4x worst case wastage.
655 // The best case winning is 8x regardless of block size.
657 // Objects obtained from tiny allocator must not be freed explicitly.
658 // So when an object will be freed explicitly, we ensure that
659 // its size >= maxTinySize.
661 // SetFinalizer has a special case for objects potentially coming
662 // from tiny allocator, it such case it allows to set finalizers
663 // for an inner byte of a memory block.
665 // The main targets of tiny allocator are small strings and
666 // standalone escaping variables. On a json benchmark
667 // the allocator reduces number of allocations by ~12% and
668 // reduces heap size by ~20%.
670 // Align tiny pointer for required (conservative) alignment.
673 } else if size
&3 == 0 {
675 } else if size
&1 == 0 {
678 if off
+size
<= maxTinySize
&& c
.tiny
!= 0 {
679 // The object fits into existing tiny block.
680 x
= unsafe
.Pointer(c
.tiny
+ off
)
681 c
.tinyoffset
= off
+ size
690 // Allocate a new maxTinySize block.
691 span
:= c
.alloc
[tinySizeClass
]
692 v
:= nextFreeFast(span
)
694 v
, _
, shouldhelpgc
= c
.nextFree(tinySizeClass
)
696 x
= unsafe
.Pointer(v
)
697 (*[2]uint64)(x
)[0] = 0
698 (*[2]uint64)(x
)[1] = 0
699 // See if we need to replace the existing tiny block with the new one
700 // based on amount of remaining free space.
701 if size
< c
.tinyoffset || c
.tiny
== 0 {
708 if size
<= smallSizeMax
-8 {
709 sizeclass
= size_to_class8
[(size
+smallSizeDiv
-1)/smallSizeDiv
]
711 sizeclass
= size_to_class128
[(size
-smallSizeMax
+largeSizeDiv
-1)/largeSizeDiv
]
713 size
= uintptr(class_to_size
[sizeclass
])
714 span
:= c
.alloc
[sizeclass
]
715 v
:= nextFreeFast(span
)
717 v
, span
, shouldhelpgc
= c
.nextFree(sizeclass
)
719 x
= unsafe
.Pointer(v
)
720 if needzero
&& span
.needzero
!= 0 {
721 memclrNoHeapPointers(unsafe
.Pointer(v
), size
)
728 s
= largeAlloc(size
, needzero
)
732 x
= unsafe
.Pointer(s
.base())
738 heapBitsSetTypeNoScan(uintptr(x
))
740 heapBitsSetType(uintptr(x
), size
, dataSize
, typ
)
741 if dataSize
> typ
.size
{
742 // Array allocation. If there are any
743 // pointers, GC has to scan to the last
745 if typ
.ptrdata
!= 0 {
746 scanSize
= dataSize
- typ
.size
+ typ
.ptrdata
749 scanSize
= typ
.ptrdata
751 c
.local_scan
+= scanSize
754 // Ensure that the stores above that initialize x to
755 // type-safe memory and set the heap bits occur before
756 // the caller can make x observable to the garbage
757 // collector. Otherwise, on weakly ordered machines,
758 // the garbage collector could follow a pointer to x,
759 // but see uninitialized memory or stale heap bits.
762 // Allocate black during GC.
763 // All slots hold nil so no scanning is needed.
764 // This may be racing with GC so do it atomically if there can be
765 // a race marking the bit.
766 if gcphase
!= _GCoff
{
767 gcmarknewobject(uintptr(x
), size
, scanSize
)
781 if debug
.allocfreetrace
!= 0 {
782 tracealloc(x
, size
, typ
)
785 if rate
:= MemProfileRate
; rate
> 0 {
786 if size
< uintptr(rate
) && int32(size
) < c
.next_sample
{
787 c
.next_sample
-= int32(size
)
790 profilealloc(mp
, x
, size
)
796 // Account for internal fragmentation in the assist
797 // debt now that we know it.
798 assistG
.gcAssistBytes
-= int64(size
- dataSize
)
801 if shouldhelpgc
&& gcShouldStart(false) {
802 gcStart(gcBackgroundMode
, false)
816 func largeAlloc(size
uintptr, needzero
bool) *mspan
{
817 // print("largeAlloc size=", size, "\n")
819 if size
+_PageSize
< size
{
820 throw("out of memory")
822 npages
:= size
>> _PageShift
823 if size
&_PageMask
!= 0 {
827 // Deduct credit for this span allocation and sweep if
828 // necessary. mHeap_Alloc will also sweep npages, so this only
829 // pays the debt down to npage pages.
830 deductSweepCredit(npages
*_PageSize
, npages
)
832 s
:= mheap_
.alloc(npages
, 0, true, needzero
)
834 throw("out of memory")
836 s
.limit
= s
.base() + size
837 heapBitsForSpan(s
.base()).initSpan(s
)
841 // implementation of new builtin
842 // compiler (both frontend and SSA backend) knows the signature
844 func newobject(typ
*_type
) unsafe
.Pointer
{
845 return mallocgc(typ
.size
, typ
, true)
848 //go:linkname reflect_unsafe_New reflect.unsafe_New
849 func reflect_unsafe_New(typ
*_type
) unsafe
.Pointer
{
850 return newobject(typ
)
853 // newarray allocates an array of n elements of type typ.
854 func newarray(typ
*_type
, n
int) unsafe
.Pointer
{
855 if n
< 0 ||
uintptr(n
) > maxSliceCap(typ
.size
) {
856 panic(plainError("runtime: allocation size out of range"))
858 return mallocgc(typ
.size
*uintptr(n
), typ
, true)
861 //go:linkname reflect_unsafe_NewArray reflect.unsafe_NewArray
862 func reflect_unsafe_NewArray(typ
*_type
, n
int) unsafe
.Pointer
{
863 return newarray(typ
, n
)
866 func profilealloc(mp
*m
, x unsafe
.Pointer
, size
uintptr) {
867 mp
.mcache
.next_sample
= nextSample()
868 mProf_Malloc(x
, size
)
871 // nextSample returns the next sampling point for heap profiling.
872 // It produces a random variable with a geometric distribution and
873 // mean MemProfileRate. This is done by generating a uniformly
874 // distributed random number and applying the cumulative distribution
875 // function for an exponential.
876 func nextSample() int32 {
878 // Plan 9 doesn't support floating point in note handler.
879 if g
:= getg(); g
== g
.m
.gsignal
{
880 return nextSampleNoFP()
884 period
:= MemProfileRate
886 // make nextSample not overflow. Maximum possible step is
887 // -ln(1/(1<<kRandomBitCount)) * period, approximately 20 * period.
889 case period
> 0x7000000:
895 // Let m be the sample rate,
896 // the probability distribution function is m*exp(-mx), so the CDF is
897 // p = 1 - exp(-mx), so
898 // q = 1 - p == exp(-mx)
901 // x = -log_e(q) * period
902 // x = log_2(q) * (-log_e(2)) * period ; Using log_2 for efficiency
903 const randomBitCount
= 26
904 q
:= fastrand()%(1<<randomBitCount
) + 1
905 qlog
:= fastlog2(float64(q
)) - randomBitCount
909 const minusLog2
= -0.6931471805599453 // -ln(2)
910 return int32(qlog
*(minusLog2
*float64(period
))) + 1
913 // nextSampleNoFP is similar to nextSample, but uses older,
914 // simpler code to avoid floating point.
915 func nextSampleNoFP() int32 {
916 // Set first allocation sample size.
917 rate
:= MemProfileRate
918 if rate
> 0x3fffffff { // make 2*rate not overflow
922 return int32(int(fastrand()) % (2 * rate
))
927 type persistentAlloc
struct {
932 var globalAlloc
struct {
937 // Wrapper around sysAlloc that can allocate small chunks.
938 // There is no associated free operation.
939 // Intended for things like function/type/debug-related persistent data.
940 // If align is 0, uses default align (currently 8).
941 // The returned memory will be zeroed.
943 // Consider marking persistentalloc'd types go:notinheap.
944 func persistentalloc(size
, align
uintptr, sysStat
*uint64) unsafe
.Pointer
{
947 p
= persistentalloc1(size
, align
, sysStat
)
952 // Must run on system stack because stack growth can (re)invoke it.
955 func persistentalloc1(size
, align
uintptr, sysStat
*uint64) unsafe
.Pointer
{
958 maxBlock
= 64 << 10 // VM reservation granularity is 64K on windows
962 throw("persistentalloc: size == 0")
965 if align
&(align
-1) != 0 {
966 throw("persistentalloc: align is not a power of 2")
968 if align
> _PageSize
{
969 throw("persistentalloc: align is too large")
975 if size
>= maxBlock
{
976 return sysAlloc(size
, sysStat
)
980 var persistent
*persistentAlloc
981 if mp
!= nil && mp
.p
!= 0 {
982 persistent
= &mp
.p
.ptr().palloc
984 lock(&globalAlloc
.mutex
)
985 persistent
= &globalAlloc
.persistentAlloc
987 persistent
.off
= round(persistent
.off
, align
)
988 if persistent
.off
+size
> chunk || persistent
.base
== nil {
989 persistent
.base
= sysAlloc(chunk
, &memstats
.other_sys
)
990 if persistent
.base
== nil {
991 if persistent
== &globalAlloc
.persistentAlloc
{
992 unlock(&globalAlloc
.mutex
)
994 throw("runtime: cannot allocate memory")
998 p
:= add(persistent
.base
, persistent
.off
)
999 persistent
.off
+= size
1001 if persistent
== &globalAlloc
.persistentAlloc
{
1002 unlock(&globalAlloc
.mutex
)
1005 if sysStat
!= &memstats
.other_sys
{
1006 mSysStatInc(sysStat
, size
)
1007 mSysStatDec(&memstats
.other_sys
, size
)