testsuite: Skip 90020.c on AIX.
[official-gcc.git] / libgo / go / runtime / mbarrier.go
blobe66b50d1927e32b786b2a08debe6ff634a0580ae
1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: write barriers.
6 //
7 // For the concurrent garbage collector, the Go compiler implements
8 // updates to pointer-valued fields that may be in heap objects by
9 // emitting calls to write barriers. The main write barrier for
10 // individual pointer writes is gcWriteBarrier and is implemented in
11 // assembly. This file contains write barrier entry points for bulk
12 // operations. See also mwbbuf.go.
14 package runtime
16 import (
17 "runtime/internal/sys"
18 "unsafe"
21 // For gccgo, use go:linkname to export compiler-called functions.
23 //go:linkname typedmemmove
24 //go:linkname typedslicecopy
25 //go:linkname memclrHasPointers
27 // Go uses a hybrid barrier that combines a Yuasa-style deletion
28 // barrier—which shades the object whose reference is being
29 // overwritten—with Dijkstra insertion barrier—which shades the object
30 // whose reference is being written. The insertion part of the barrier
31 // is necessary while the calling goroutine's stack is grey. In
32 // pseudocode, the barrier is:
34 // writePointer(slot, ptr):
35 // shade(*slot)
36 // if current stack is grey:
37 // shade(ptr)
38 // *slot = ptr
40 // slot is the destination in Go code.
41 // ptr is the value that goes into the slot in Go code.
43 // Shade indicates that it has seen a white pointer by adding the referent
44 // to wbuf as well as marking it.
46 // The two shades and the condition work together to prevent a mutator
47 // from hiding an object from the garbage collector:
49 // 1. shade(*slot) prevents a mutator from hiding an object by moving
50 // the sole pointer to it from the heap to its stack. If it attempts
51 // to unlink an object from the heap, this will shade it.
53 // 2. shade(ptr) prevents a mutator from hiding an object by moving
54 // the sole pointer to it from its stack into a black object in the
55 // heap. If it attempts to install the pointer into a black object,
56 // this will shade it.
58 // 3. Once a goroutine's stack is black, the shade(ptr) becomes
59 // unnecessary. shade(ptr) prevents hiding an object by moving it from
60 // the stack to the heap, but this requires first having a pointer
61 // hidden on the stack. Immediately after a stack is scanned, it only
62 // points to shaded objects, so it's not hiding anything, and the
63 // shade(*slot) prevents it from hiding any other pointers on its
64 // stack.
66 // For a detailed description of this barrier and proof of
67 // correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
71 // Dealing with memory ordering:
73 // Both the Yuasa and Dijkstra barriers can be made conditional on the
74 // color of the object containing the slot. We chose not to make these
75 // conditional because the cost of ensuring that the object holding
76 // the slot doesn't concurrently change color without the mutator
77 // noticing seems prohibitive.
79 // Consider the following example where the mutator writes into
80 // a slot and then loads the slot's mark bit while the GC thread
81 // writes to the slot's mark bit and then as part of scanning reads
82 // the slot.
84 // Initially both [slot] and [slotmark] are 0 (nil)
85 // Mutator thread GC thread
86 // st [slot], ptr st [slotmark], 1
88 // ld r1, [slotmark] ld r2, [slot]
90 // Without an expensive memory barrier between the st and the ld, the final
91 // result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
92 // example of what can happen when loads are allowed to be reordered with older
93 // stores (avoiding such reorderings lies at the heart of the classic
94 // Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
95 // barriers, which will slow down both the mutator and the GC, we always grey
96 // the ptr object regardless of the slot's color.
98 // Another place where we intentionally omit memory barriers is when
99 // accessing mheap_.arena_used to check if a pointer points into the
100 // heap. On relaxed memory machines, it's possible for a mutator to
101 // extend the size of the heap by updating arena_used, allocate an
102 // object from this new region, and publish a pointer to that object,
103 // but for tracing running on another processor to observe the pointer
104 // but use the old value of arena_used. In this case, tracing will not
105 // mark the object, even though it's reachable. However, the mutator
106 // is guaranteed to execute a write barrier when it publishes the
107 // pointer, so it will take care of marking the object. A general
108 // consequence of this is that the garbage collector may cache the
109 // value of mheap_.arena_used. (See issue #9984.)
112 // Stack writes:
114 // The compiler omits write barriers for writes to the current frame,
115 // but if a stack pointer has been passed down the call stack, the
116 // compiler will generate a write barrier for writes through that
117 // pointer (because it doesn't know it's not a heap pointer).
119 // One might be tempted to ignore the write barrier if slot points
120 // into to the stack. Don't do it! Mark termination only re-scans
121 // frames that have potentially been active since the concurrent scan,
122 // so it depends on write barriers to track changes to pointers in
123 // stack frames that have not been active.
126 // Global writes:
128 // The Go garbage collector requires write barriers when heap pointers
129 // are stored in globals. Many garbage collectors ignore writes to
130 // globals and instead pick up global -> heap pointers during
131 // termination. This increases pause time, so we instead rely on write
132 // barriers for writes to globals so that we don't have to rescan
133 // global during mark termination.
136 // Publication ordering:
138 // The write barrier is *pre-publication*, meaning that the write
139 // barrier happens prior to the *slot = ptr write that may make ptr
140 // reachable by some goroutine that currently cannot reach it.
143 // Signal handler pointer writes:
145 // In general, the signal handler cannot safely invoke the write
146 // barrier because it may run without a P or even during the write
147 // barrier.
149 // There is exactly one exception: profbuf.go omits a barrier during
150 // signal handler profile logging. That's safe only because of the
151 // deletion barrier. See profbuf.go for a detailed argument. If we
152 // remove the deletion barrier, we'll have to work out a new way to
153 // handle the profile logging.
155 // typedmemmove copies a value of type t to dst from src.
156 // Must be nosplit, see #16026.
158 // TODO: Perfect for go:nosplitrec since we can't have a safe point
159 // anywhere in the bulk barrier or memmove.
161 //go:nosplit
162 func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
163 if dst == src {
164 return
166 if typ.ptrdata != 0 {
167 bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.size)
169 // There's a race here: if some other goroutine can write to
170 // src, it may change some pointer in src after we've
171 // performed the write barrier but before we perform the
172 // memory copy. This safe because the write performed by that
173 // other goroutine must also be accompanied by a write
174 // barrier, so at worst we've unnecessarily greyed the old
175 // pointer that was in src.
176 memmove(dst, src, typ.size)
177 if writeBarrier.cgo {
178 cgoCheckMemmove(typ, dst, src, 0, typ.size)
182 //go:linkname reflect_typedmemmove reflect.typedmemmove
183 func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
184 if raceenabled {
185 raceWriteObjectPC(typ, dst, getcallerpc(), funcPC(reflect_typedmemmove))
186 raceReadObjectPC(typ, src, getcallerpc(), funcPC(reflect_typedmemmove))
188 if msanenabled {
189 msanwrite(dst, typ.size)
190 msanread(src, typ.size)
192 typedmemmove(typ, dst, src)
195 //go:linkname reflectlite_typedmemmove internal..z2freflectlite.typedmemmove
196 func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
197 reflect_typedmemmove(typ, dst, src)
200 // typedmemmovepartial is like typedmemmove but assumes that
201 // dst and src point off bytes into the value and only copies size bytes.
202 //go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
203 func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
204 if writeBarrier.needed && typ.ptrdata != 0 && size >= sys.PtrSize {
205 // Pointer-align start address for bulk barrier.
206 adst, asrc, asize := dst, src, size
207 if frag := -off & (sys.PtrSize - 1); frag != 0 {
208 adst = add(dst, frag)
209 asrc = add(src, frag)
210 asize -= frag
212 bulkBarrierPreWrite(uintptr(adst), uintptr(asrc), asize&^(sys.PtrSize-1))
215 memmove(dst, src, size)
216 if writeBarrier.cgo {
217 cgoCheckMemmove(typ, dst, src, off, size)
221 //go:nosplit
222 func typedslicecopy(typ *_type, dst, src slice) int {
223 n := dst.len
224 if n > src.len {
225 n = src.len
227 if n == 0 {
228 return 0
230 dstp := dst.array
231 srcp := src.array
233 // The compiler emits calls to typedslicecopy before
234 // instrumentation runs, so unlike the other copying and
235 // assignment operations, it's not instrumented in the calling
236 // code and needs its own instrumentation.
237 if raceenabled {
238 callerpc := getcallerpc()
239 pc := funcPC(slicecopy)
240 racewriterangepc(dstp, uintptr(n)*typ.size, callerpc, pc)
241 racereadrangepc(srcp, uintptr(n)*typ.size, callerpc, pc)
243 if msanenabled {
244 msanwrite(dstp, uintptr(n)*typ.size)
245 msanread(srcp, uintptr(n)*typ.size)
248 if writeBarrier.cgo {
249 cgoCheckSliceCopy(typ, dst, src, n)
252 if dstp == srcp {
253 return n
256 // Note: No point in checking typ.ptrdata here:
257 // compiler only emits calls to typedslicecopy for types with pointers,
258 // and growslice and reflect_typedslicecopy check for pointers
259 // before calling typedslicecopy.
260 size := uintptr(n) * typ.size
261 if writeBarrier.needed {
262 bulkBarrierPreWrite(uintptr(dstp), uintptr(srcp), size)
264 // See typedmemmove for a discussion of the race between the
265 // barrier and memmove.
266 memmove(dstp, srcp, size)
267 return n
270 //go:linkname reflect_typedslicecopy reflect.typedslicecopy
271 func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
272 if elemType.ptrdata == 0 {
273 n := dst.len
274 if n > src.len {
275 n = src.len
277 if n == 0 {
278 return 0
281 size := uintptr(n) * elemType.size
282 if raceenabled {
283 callerpc := getcallerpc()
284 pc := funcPC(reflect_typedslicecopy)
285 racewriterangepc(dst.array, size, callerpc, pc)
286 racereadrangepc(src.array, size, callerpc, pc)
288 if msanenabled {
289 msanwrite(dst.array, size)
290 msanread(src.array, size)
293 memmove(dst.array, src.array, size)
294 return n
296 return typedslicecopy(elemType, dst, src)
299 // typedmemclr clears the typed memory at ptr with type typ. The
300 // memory at ptr must already be initialized (and hence in type-safe
301 // state). If the memory is being initialized for the first time, see
302 // memclrNoHeapPointers.
304 // If the caller knows that typ has pointers, it can alternatively
305 // call memclrHasPointers.
307 //go:nosplit
308 func typedmemclr(typ *_type, ptr unsafe.Pointer) {
309 if typ.ptrdata != 0 {
310 bulkBarrierPreWrite(uintptr(ptr), 0, typ.size)
312 memclrNoHeapPointers(ptr, typ.size)
315 //go:linkname reflect_typedmemclr reflect.typedmemclr
316 func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) {
317 typedmemclr(typ, ptr)
320 //go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial
321 func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) {
322 if typ.ptrdata != 0 {
323 bulkBarrierPreWrite(uintptr(ptr), 0, size)
325 memclrNoHeapPointers(ptr, size)
328 // memclrHasPointers clears n bytes of typed memory starting at ptr.
329 // The caller must ensure that the type of the object at ptr has
330 // pointers, usually by checking typ.ptrdata. However, ptr
331 // does not have to point to the start of the allocation.
333 //go:nosplit
334 func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
335 bulkBarrierPreWrite(uintptr(ptr), 0, n)
336 memclrNoHeapPointers(ptr, n)