compiler: revert `for package-scope "a = b; b = x" just set "a = x"`
[official-gcc.git] / libgo / go / runtime / mpagecache.go
blob5bad4f789a11982659da1d894509f312859f8faa
1 // Copyright 2019 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/sys"
9 "unsafe"
12 const pageCachePages = 8 * unsafe.Sizeof(pageCache{}.cache)
14 // pageCache represents a per-p cache of pages the allocator can
15 // allocate from without a lock. More specifically, it represents
16 // a pageCachePages*pageSize chunk of memory with 0 or more free
17 // pages in it.
18 type pageCache struct {
19 base uintptr // base address of the chunk
20 cache uint64 // 64-bit bitmap representing free pages (1 means free)
21 scav uint64 // 64-bit bitmap representing scavenged pages (1 means scavenged)
24 // empty returns true if the pageCache has any free pages, and false
25 // otherwise.
26 func (c *pageCache) empty() bool {
27 return c.cache == 0
30 // alloc allocates npages from the page cache and is the main entry
31 // point for allocation.
33 // Returns a base address and the amount of scavenged memory in the
34 // allocated region in bytes.
36 // Returns a base address of zero on failure, in which case the
37 // amount of scavenged memory should be ignored.
38 func (c *pageCache) alloc(npages uintptr) (uintptr, uintptr) {
39 if c.cache == 0 {
40 return 0, 0
42 if npages == 1 {
43 i := uintptr(sys.TrailingZeros64(c.cache))
44 scav := (c.scav >> i) & 1
45 c.cache &^= 1 << i // set bit to mark in-use
46 c.scav &^= 1 << i // clear bit to mark unscavenged
47 return c.base + i*pageSize, uintptr(scav) * pageSize
49 return c.allocN(npages)
52 // allocN is a helper which attempts to allocate npages worth of pages
53 // from the cache. It represents the general case for allocating from
54 // the page cache.
56 // Returns a base address and the amount of scavenged memory in the
57 // allocated region in bytes.
58 func (c *pageCache) allocN(npages uintptr) (uintptr, uintptr) {
59 i := findBitRange64(c.cache, uint(npages))
60 if i >= 64 {
61 return 0, 0
63 mask := ((uint64(1) << npages) - 1) << i
64 scav := sys.OnesCount64(c.scav & mask)
65 c.cache &^= mask // mark in-use bits
66 c.scav &^= mask // clear scavenged bits
67 return c.base + uintptr(i*pageSize), uintptr(scav) * pageSize
70 // flush empties out unallocated free pages in the given cache
71 // into s. Then, it clears the cache, such that empty returns
72 // true.
74 // p.mheapLock must be held.
76 // Must run on the system stack because p.mheapLock must be held.
78 //go:systemstack
79 func (c *pageCache) flush(p *pageAlloc) {
80 assertLockHeld(p.mheapLock)
82 if c.empty() {
83 return
85 ci := chunkIndex(c.base)
86 pi := chunkPageIndex(c.base)
88 // This method is called very infrequently, so just do the
89 // slower, safer thing by iterating over each bit individually.
90 for i := uint(0); i < 64; i++ {
91 if c.cache&(1<<i) != 0 {
92 p.chunkOf(ci).free1(pi + i)
94 if c.scav&(1<<i) != 0 {
95 p.chunkOf(ci).scavenged.setRange(pi+i, 1)
98 // Since this is a lot like a free, we need to make sure
99 // we update the searchAddr just like free does.
100 if b := (offAddr{c.base}); b.lessThan(p.searchAddr) {
101 p.searchAddr = b
103 p.update(c.base, pageCachePages, false, false)
104 *c = pageCache{}
107 // allocToCache acquires a pageCachePages-aligned chunk of free pages which
108 // may not be contiguous, and returns a pageCache structure which owns the
109 // chunk.
111 // p.mheapLock must be held.
113 // Must run on the system stack because p.mheapLock must be held.
115 //go:systemstack
116 func (p *pageAlloc) allocToCache() pageCache {
117 assertLockHeld(p.mheapLock)
119 // If the searchAddr refers to a region which has a higher address than
120 // any known chunk, then we know we're out of memory.
121 if chunkIndex(p.searchAddr.addr()) >= p.end {
122 return pageCache{}
124 c := pageCache{}
125 ci := chunkIndex(p.searchAddr.addr()) // chunk index
126 var chunk *pallocData
127 if p.summary[len(p.summary)-1][ci] != 0 {
128 // Fast path: there's free pages at or near the searchAddr address.
129 chunk = p.chunkOf(ci)
130 j, _ := chunk.find(1, chunkPageIndex(p.searchAddr.addr()))
131 if j == ^uint(0) {
132 throw("bad summary data")
134 c = pageCache{
135 base: chunkBase(ci) + alignDown(uintptr(j), 64)*pageSize,
136 cache: ^chunk.pages64(j),
137 scav: chunk.scavenged.block64(j),
139 } else {
140 // Slow path: the searchAddr address had nothing there, so go find
141 // the first free page the slow way.
142 addr, _ := p.find(1)
143 if addr == 0 {
144 // We failed to find adequate free space, so mark the searchAddr as OoM
145 // and return an empty pageCache.
146 p.searchAddr = maxSearchAddr()
147 return pageCache{}
149 ci := chunkIndex(addr)
150 chunk = p.chunkOf(ci)
151 c = pageCache{
152 base: alignDown(addr, 64*pageSize),
153 cache: ^chunk.pages64(chunkPageIndex(addr)),
154 scav: chunk.scavenged.block64(chunkPageIndex(addr)),
158 // Set the page bits as allocated and clear the scavenged bits, but
159 // be careful to only set and clear the relevant bits.
160 cpi := chunkPageIndex(c.base)
161 chunk.allocPages64(cpi, c.cache)
162 chunk.scavenged.clearBlock64(cpi, c.cache&c.scav /* free and scavenged */)
164 // Update as an allocation, but note that it's not contiguous.
165 p.update(c.base, pageCachePages, false, true)
167 // Set the search address to the last page represented by the cache.
168 // Since all of the pages in this block are going to the cache, and we
169 // searched for the first free page, we can confidently start at the
170 // next page.
172 // However, p.searchAddr is not allowed to point into unmapped heap memory
173 // unless it is maxSearchAddr, so make it the last page as opposed to
174 // the page after.
175 p.searchAddr = offAddr{c.base + pageSize*(pageCachePages-1)}
176 return c