* builtins.def (BUILT_IN_SETJMP): Revert latest change.
[official-gcc.git] / libgo / go / runtime / mbarrier.go
blobd54016f0ba9c635d9d8e5b03b41ce1d4bc3839db
1 // Copyright 2015 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: write barriers.
6 //
7 // For the concurrent garbage collector, the Go compiler implements
8 // updates to pointer-valued fields that may be in heap objects by
9 // emitting calls to write barriers. This file contains the actual write barrier
10 // implementation, gcmarkwb_m, and the various wrappers called by the
11 // compiler to implement pointer assignment, slice assignment,
12 // typed memmove, and so on.
14 package runtime
16 import (
17 "runtime/internal/sys"
18 "unsafe"
21 // For gccgo, use go:linkname to rename compiler-called functions to
22 // themselves, so that the compiler will export them.
24 //go:linkname writebarrierptr runtime.writebarrierptr
25 //go:linkname typedmemmove runtime.typedmemmove
26 //go:linkname typedslicecopy runtime.typedslicecopy
28 // gcmarkwb_m is the mark-phase write barrier, the only barrier we have.
29 // The rest of this file exists only to make calls to this function.
31 // This is a hybrid barrier that combines a Yuasa-style deletion
32 // barrier—which shades the object whose reference is being
33 // overwritten—with Dijkstra insertion barrier—which shades the object
34 // whose reference is being written. The insertion part of the barrier
35 // is necessary while the calling goroutine's stack is grey. In
36 // pseudocode, the barrier is:
38 // writePointer(slot, ptr):
39 // shade(*slot)
40 // if current stack is grey:
41 // shade(ptr)
42 // *slot = ptr
44 // slot is the destination in Go code.
45 // ptr is the value that goes into the slot in Go code.
47 // Shade indicates that it has seen a white pointer by adding the referent
48 // to wbuf as well as marking it.
50 // The two shades and the condition work together to prevent a mutator
51 // from hiding an object from the garbage collector:
53 // 1. shade(*slot) prevents a mutator from hiding an object by moving
54 // the sole pointer to it from the heap to its stack. If it attempts
55 // to unlink an object from the heap, this will shade it.
57 // 2. shade(ptr) prevents a mutator from hiding an object by moving
58 // the sole pointer to it from its stack into a black object in the
59 // heap. If it attempts to install the pointer into a black object,
60 // this will shade it.
62 // 3. Once a goroutine's stack is black, the shade(ptr) becomes
63 // unnecessary. shade(ptr) prevents hiding an object by moving it from
64 // the stack to the heap, but this requires first having a pointer
65 // hidden on the stack. Immediately after a stack is scanned, it only
66 // points to shaded objects, so it's not hiding anything, and the
67 // shade(*slot) prevents it from hiding any other pointers on its
68 // stack.
70 // For a detailed description of this barrier and proof of
71 // correctness, see https://github.com/golang/proposal/blob/master/design/17503-eliminate-rescan.md
75 // Dealing with memory ordering:
77 // Both the Yuasa and Dijkstra barriers can be made conditional on the
78 // color of the object containing the slot. We chose not to make these
79 // conditional because the cost of ensuring that the object holding
80 // the slot doesn't concurrently change color without the mutator
81 // noticing seems prohibitive.
83 // Consider the following example where the mutator writes into
84 // a slot and then loads the slot's mark bit while the GC thread
85 // writes to the slot's mark bit and then as part of scanning reads
86 // the slot.
88 // Initially both [slot] and [slotmark] are 0 (nil)
89 // Mutator thread GC thread
90 // st [slot], ptr st [slotmark], 1
92 // ld r1, [slotmark] ld r2, [slot]
94 // Without an expensive memory barrier between the st and the ld, the final
95 // result on most HW (including 386/amd64) can be r1==r2==0. This is a classic
96 // example of what can happen when loads are allowed to be reordered with older
97 // stores (avoiding such reorderings lies at the heart of the classic
98 // Peterson/Dekker algorithms for mutual exclusion). Rather than require memory
99 // barriers, which will slow down both the mutator and the GC, we always grey
100 // the ptr object regardless of the slot's color.
102 // Another place where we intentionally omit memory barriers is when
103 // accessing mheap_.arena_used to check if a pointer points into the
104 // heap. On relaxed memory machines, it's possible for a mutator to
105 // extend the size of the heap by updating arena_used, allocate an
106 // object from this new region, and publish a pointer to that object,
107 // but for tracing running on another processor to observe the pointer
108 // but use the old value of arena_used. In this case, tracing will not
109 // mark the object, even though it's reachable. However, the mutator
110 // is guaranteed to execute a write barrier when it publishes the
111 // pointer, so it will take care of marking the object. A general
112 // consequence of this is that the garbage collector may cache the
113 // value of mheap_.arena_used. (See issue #9984.)
116 // Stack writes:
118 // The compiler omits write barriers for writes to the current frame,
119 // but if a stack pointer has been passed down the call stack, the
120 // compiler will generate a write barrier for writes through that
121 // pointer (because it doesn't know it's not a heap pointer).
123 // One might be tempted to ignore the write barrier if slot points
124 // into to the stack. Don't do it! Mark termination only re-scans
125 // frames that have potentially been active since the concurrent scan,
126 // so it depends on write barriers to track changes to pointers in
127 // stack frames that have not been active.
130 // Global writes:
132 // The Go garbage collector requires write barriers when heap pointers
133 // are stored in globals. Many garbage collectors ignore writes to
134 // globals and instead pick up global -> heap pointers during
135 // termination. This increases pause time, so we instead rely on write
136 // barriers for writes to globals so that we don't have to rescan
137 // global during mark termination.
140 // Publication ordering:
142 // The write barrier is *pre-publication*, meaning that the write
143 // barrier happens prior to the *slot = ptr write that may make ptr
144 // reachable by some goroutine that currently cannot reach it.
147 //go:nowritebarrierrec
148 //go:systemstack
149 func gcmarkwb_m(slot *uintptr, ptr uintptr) {
150 if writeBarrier.needed {
151 // Note: This turns bad pointer writes into bad
152 // pointer reads, which could be confusing. We avoid
153 // reading from obviously bad pointers, which should
154 // take care of the vast majority of these. We could
155 // patch this up in the signal handler, or use XCHG to
156 // combine the read and the write. Checking inheap is
157 // insufficient since we need to track changes to
158 // roots outside the heap.
160 // Note: profbuf.go omits a barrier during signal handler
161 // profile logging; that's safe only because this deletion barrier exists.
162 // If we remove the deletion barrier, we'll have to work out
163 // a new way to handle the profile logging.
164 if slot1 := uintptr(unsafe.Pointer(slot)); slot1 >= minPhysPageSize {
165 if optr := *slot; optr != 0 {
166 shade(optr)
169 // TODO: Make this conditional on the caller's stack color.
170 if ptr != 0 && inheap(ptr) {
171 shade(ptr)
176 // writebarrierptr_prewrite1 invokes a write barrier for *dst = src
177 // prior to the write happening.
179 // Write barrier calls must not happen during critical GC and scheduler
180 // related operations. In particular there are times when the GC assumes
181 // that the world is stopped but scheduler related code is still being
182 // executed, dealing with syscalls, dealing with putting gs on runnable
183 // queues and so forth. This code cannot execute write barriers because
184 // the GC might drop them on the floor. Stopping the world involves removing
185 // the p associated with an m. We use the fact that m.p == nil to indicate
186 // that we are in one these critical section and throw if the write is of
187 // a pointer to a heap object.
188 //go:nosplit
189 func writebarrierptr_prewrite1(dst *uintptr, src uintptr) {
190 mp := acquirem()
191 if mp.inwb || mp.dying > 0 {
192 releasem(mp)
193 return
195 systemstack(func() {
196 if mp.p == 0 && memstats.enablegc && !mp.inwb && inheap(src) {
197 throw("writebarrierptr_prewrite1 called with mp.p == nil")
199 mp.inwb = true
200 gcmarkwb_m(dst, src)
202 mp.inwb = false
203 releasem(mp)
206 // NOTE: Really dst *unsafe.Pointer, src unsafe.Pointer,
207 // but if we do that, Go inserts a write barrier on *dst = src.
208 //go:nosplit
209 func writebarrierptr(dst *uintptr, src uintptr) {
210 if writeBarrier.cgo {
211 cgoCheckWriteBarrier(dst, src)
213 if !writeBarrier.needed {
214 *dst = src
215 return
217 if src != 0 && src < minPhysPageSize {
218 systemstack(func() {
219 print("runtime: writebarrierptr *", dst, " = ", hex(src), "\n")
220 throw("bad pointer in write barrier")
223 writebarrierptr_prewrite1(dst, src)
224 *dst = src
227 // writebarrierptr_prewrite is like writebarrierptr, but the store
228 // will be performed by the caller after this call. The caller must
229 // not allow preemption between this call and the write.
231 //go:nosplit
232 func writebarrierptr_prewrite(dst *uintptr, src uintptr) {
233 if writeBarrier.cgo {
234 cgoCheckWriteBarrier(dst, src)
236 if !writeBarrier.needed {
237 return
239 if src != 0 && src < minPhysPageSize {
240 systemstack(func() { throw("bad pointer in write barrier") })
242 writebarrierptr_prewrite1(dst, src)
245 // typedmemmove copies a value of type t to dst from src.
246 // Must be nosplit, see #16026.
247 //go:nosplit
248 func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
249 if typ.kind&kindNoPointers == 0 {
250 bulkBarrierPreWrite(uintptr(dst), uintptr(src), typ.size)
252 // There's a race here: if some other goroutine can write to
253 // src, it may change some pointer in src after we've
254 // performed the write barrier but before we perform the
255 // memory copy. This safe because the write performed by that
256 // other goroutine must also be accompanied by a write
257 // barrier, so at worst we've unnecessarily greyed the old
258 // pointer that was in src.
259 memmove(dst, src, typ.size)
260 if writeBarrier.cgo {
261 cgoCheckMemmove(typ, dst, src, 0, typ.size)
265 //go:linkname reflect_typedmemmove reflect.typedmemmove
266 func reflect_typedmemmove(typ *_type, dst, src unsafe.Pointer) {
267 if raceenabled {
268 raceWriteObjectPC(typ, dst, getcallerpc(unsafe.Pointer(&typ)), funcPC(reflect_typedmemmove))
269 raceReadObjectPC(typ, src, getcallerpc(unsafe.Pointer(&typ)), funcPC(reflect_typedmemmove))
271 if msanenabled {
272 msanwrite(dst, typ.size)
273 msanread(src, typ.size)
275 typedmemmove(typ, dst, src)
278 // typedmemmovepartial is like typedmemmove but assumes that
279 // dst and src point off bytes into the value and only copies size bytes.
280 //go:linkname reflect_typedmemmovepartial reflect.typedmemmovepartial
281 func reflect_typedmemmovepartial(typ *_type, dst, src unsafe.Pointer, off, size uintptr) {
282 if writeBarrier.needed && typ.kind&kindNoPointers == 0 && size >= sys.PtrSize {
283 // Pointer-align start address for bulk barrier.
284 adst, asrc, asize := dst, src, size
285 if frag := -off & (sys.PtrSize - 1); frag != 0 {
286 adst = add(dst, frag)
287 asrc = add(src, frag)
288 asize -= frag
290 bulkBarrierPreWrite(uintptr(adst), uintptr(asrc), asize&^(sys.PtrSize-1))
293 memmove(dst, src, size)
294 if writeBarrier.cgo {
295 cgoCheckMemmove(typ, dst, src, off, size)
299 //go:nosplit
300 func typedslicecopy(typ *_type, dst, src slice) int {
301 // TODO(rsc): If typedslicecopy becomes faster than calling
302 // typedmemmove repeatedly, consider using during func growslice.
303 n := dst.len
304 if n > src.len {
305 n = src.len
307 if n == 0 {
308 return 0
310 dstp := dst.array
311 srcp := src.array
313 if raceenabled {
314 callerpc := getcallerpc(unsafe.Pointer(&typ))
315 pc := funcPC(slicecopy)
316 racewriterangepc(dstp, uintptr(n)*typ.size, callerpc, pc)
317 racereadrangepc(srcp, uintptr(n)*typ.size, callerpc, pc)
319 if msanenabled {
320 msanwrite(dstp, uintptr(n)*typ.size)
321 msanread(srcp, uintptr(n)*typ.size)
324 if writeBarrier.cgo {
325 cgoCheckSliceCopy(typ, dst, src, n)
328 // Note: No point in checking typ.kind&kindNoPointers here:
329 // compiler only emits calls to typedslicecopy for types with pointers,
330 // and growslice and reflect_typedslicecopy check for pointers
331 // before calling typedslicecopy.
332 if !writeBarrier.needed {
333 memmove(dstp, srcp, uintptr(n)*typ.size)
334 return n
337 systemstack(func() {
338 if uintptr(srcp) < uintptr(dstp) && uintptr(srcp)+uintptr(n)*typ.size > uintptr(dstp) {
339 // Overlap with src before dst.
340 // Copy backward, being careful not to move dstp/srcp
341 // out of the array they point into.
342 dstp = add(dstp, uintptr(n-1)*typ.size)
343 srcp = add(srcp, uintptr(n-1)*typ.size)
344 i := 0
345 for {
346 typedmemmove(typ, dstp, srcp)
347 if i++; i >= n {
348 break
350 dstp = add(dstp, -typ.size)
351 srcp = add(srcp, -typ.size)
353 } else {
354 // Copy forward, being careful not to move dstp/srcp
355 // out of the array they point into.
356 i := 0
357 for {
358 typedmemmove(typ, dstp, srcp)
359 if i++; i >= n {
360 break
362 dstp = add(dstp, typ.size)
363 srcp = add(srcp, typ.size)
367 return n
370 //go:linkname reflect_typedslicecopy reflect.typedslicecopy
371 func reflect_typedslicecopy(elemType *_type, dst, src slice) int {
372 if elemType.kind&kindNoPointers != 0 {
373 n := dst.len
374 if n > src.len {
375 n = src.len
377 if n == 0 {
378 return 0
381 size := uintptr(n) * elemType.size
382 if raceenabled {
383 callerpc := getcallerpc(unsafe.Pointer(&elemType))
384 pc := funcPC(reflect_typedslicecopy)
385 racewriterangepc(dst.array, size, callerpc, pc)
386 racereadrangepc(src.array, size, callerpc, pc)
388 if msanenabled {
389 msanwrite(dst.array, size)
390 msanread(src.array, size)
393 memmove(dst.array, src.array, size)
394 return n
396 return typedslicecopy(elemType, dst, src)
399 // typedmemclr clears the typed memory at ptr with type typ. The
400 // memory at ptr must already be initialized (and hence in type-safe
401 // state). If the memory is being initialized for the first time, see
402 // memclrNoHeapPointers.
404 // If the caller knows that typ has pointers, it can alternatively
405 // call memclrHasPointers.
407 //go:nosplit
408 func typedmemclr(typ *_type, ptr unsafe.Pointer) {
409 if typ.kind&kindNoPointers == 0 {
410 bulkBarrierPreWrite(uintptr(ptr), 0, typ.size)
412 memclrNoHeapPointers(ptr, typ.size)
415 // memclrHasPointers clears n bytes of typed memory starting at ptr.
416 // The caller must ensure that the type of the object at ptr has
417 // pointers, usually by checking typ.kind&kindNoPointers. However, ptr
418 // does not have to point to the start of the allocation.
420 //go:nosplit
421 func memclrHasPointers(ptr unsafe.Pointer, n uintptr) {
422 bulkBarrierPreWrite(uintptr(ptr), 0, n)
423 memclrNoHeapPointers(ptr, n)