libgo: update to go1.9
[official-gcc.git] / libgo / go / runtime / mbitmap.go
blobd1a58202352427465ee91255c85be4e4a62830dd
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Garbage collector: type and heap bitmaps.
6 //
7 // Stack, data, and bss bitmaps
8 //
9 // Stack frames and global variables in the data and bss sections are described
10 // by 1-bit bitmaps in which 0 means uninteresting and 1 means live pointer
11 // to be visited during GC. The bits in each byte are consumed starting with
12 // the low bit: 1<<0, 1<<1, and so on.
14 // Heap bitmap
16 // The allocated heap comes from a subset of the memory in the range [start, used),
17 // where start == mheap_.arena_start and used == mheap_.arena_used.
18 // The heap bitmap comprises 2 bits for each pointer-sized word in that range,
19 // stored in bytes indexed backward in memory from start.
20 // That is, the byte at address start-1 holds the 2-bit entries for the four words
21 // start through start+3*ptrSize, the byte at start-2 holds the entries for
22 // start+4*ptrSize through start+7*ptrSize, and so on.
24 // In each 2-bit entry, the lower bit holds the same information as in the 1-bit
25 // bitmaps: 0 means uninteresting and 1 means live pointer to be visited during GC.
26 // The meaning of the high bit depends on the position of the word being described
27 // in its allocated object. In all words *except* the second word, the
28 // high bit indicates that the object is still being described. In
29 // these words, if a bit pair with a high bit 0 is encountered, the
30 // low bit can also be assumed to be 0, and the object description is
31 // over. This 00 is called the ``dead'' encoding: it signals that the
32 // rest of the words in the object are uninteresting to the garbage
33 // collector.
35 // In the second word, the high bit is the GC ``checkmarked'' bit (see below).
37 // The 2-bit entries are split when written into the byte, so that the top half
38 // of the byte contains 4 high bits and the bottom half contains 4 low (pointer)
39 // bits.
40 // This form allows a copy from the 1-bit to the 4-bit form to keep the
41 // pointer bits contiguous, instead of having to space them out.
43 // The code makes use of the fact that the zero value for a heap bitmap
44 // has no live pointer bit set and is (depending on position), not used,
45 // not checkmarked, and is the dead encoding.
46 // These properties must be preserved when modifying the encoding.
48 // The bitmap for noscan spans is not maintained. Code must ensure
49 // that an object is scannable before consulting its bitmap by
50 // checking either the noscan bit in the span or by consulting its
51 // type's information.
53 // Checkmarks
55 // In a concurrent garbage collector, one worries about failing to mark
56 // a live object due to mutations without write barriers or bugs in the
57 // collector implementation. As a sanity check, the GC has a 'checkmark'
58 // mode that retraverses the object graph with the world stopped, to make
59 // sure that everything that should be marked is marked.
60 // In checkmark mode, in the heap bitmap, the high bit of the 2-bit entry
61 // for the second word of the object holds the checkmark bit.
62 // When not in checkmark mode, this bit is set to 1.
64 // The smallest possible allocation is 8 bytes. On a 32-bit machine, that
65 // means every allocated object has two words, so there is room for the
66 // checkmark bit. On a 64-bit machine, however, the 8-byte allocation is
67 // just one word, so the second bit pair is not available for encoding the
68 // checkmark. However, because non-pointer allocations are combined
69 // into larger 16-byte (maxTinySize) allocations, a plain 8-byte allocation
70 // must be a pointer, so the type bit in the first word is not actually needed.
71 // It is still used in general, except in checkmark the type bit is repurposed
72 // as the checkmark bit and then reinitialized (to 1) as the type bit when
73 // finished.
76 package runtime
78 import (
79 "runtime/internal/atomic"
80 "runtime/internal/sys"
81 "unsafe"
84 const (
85 bitPointer = 1 << 0
86 bitScan = 1 << 4
88 heapBitsShift = 1 // shift offset between successive bitPointer or bitScan entries
89 heapBitmapScale = sys.PtrSize * (8 / 2) // number of data bytes described by one heap bitmap byte
91 // all scan/pointer bits in a byte
92 bitScanAll = bitScan | bitScan<<heapBitsShift | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
93 bitPointerAll = bitPointer | bitPointer<<heapBitsShift | bitPointer<<(2*heapBitsShift) | bitPointer<<(3*heapBitsShift)
96 // addb returns the byte pointer p+n.
97 //go:nowritebarrier
98 //go:nosplit
99 func addb(p *byte, n uintptr) *byte {
100 // Note: wrote out full expression instead of calling add(p, n)
101 // to reduce the number of temporaries generated by the
102 // compiler for this trivial expression during inlining.
103 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n))
106 // subtractb returns the byte pointer p-n.
107 // subtractb is typically used when traversing the pointer tables referred to by hbits
108 // which are arranged in reverse order.
109 //go:nowritebarrier
110 //go:nosplit
111 func subtractb(p *byte, n uintptr) *byte {
112 // Note: wrote out full expression instead of calling add(p, -n)
113 // to reduce the number of temporaries generated by the
114 // compiler for this trivial expression during inlining.
115 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - n))
118 // add1 returns the byte pointer p+1.
119 //go:nowritebarrier
120 //go:nosplit
121 func add1(p *byte) *byte {
122 // Note: wrote out full expression instead of calling addb(p, 1)
123 // to reduce the number of temporaries generated by the
124 // compiler for this trivial expression during inlining.
125 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1))
128 // subtract1 returns the byte pointer p-1.
129 // subtract1 is typically used when traversing the pointer tables referred to by hbits
130 // which are arranged in reverse order.
131 //go:nowritebarrier
133 // nosplit because it is used during write barriers and must not be preempted.
134 //go:nosplit
135 func subtract1(p *byte) *byte {
136 // Note: wrote out full expression instead of calling subtractb(p, 1)
137 // to reduce the number of temporaries generated by the
138 // compiler for this trivial expression during inlining.
139 return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
142 // mapBits maps any additional bitmap memory needed for the new arena memory.
144 // Don't call this directly. Call mheap.setArenaUsed.
146 //go:nowritebarrier
147 func (h *mheap) mapBits(arena_used uintptr) {
148 // Caller has added extra mappings to the arena.
149 // Add extra mappings of bitmap words as needed.
150 // We allocate extra bitmap pieces in chunks of bitmapChunk.
151 const bitmapChunk = 8192
153 n := (arena_used - mheap_.arena_start) / heapBitmapScale
154 n = round(n, bitmapChunk)
155 n = round(n, physPageSize)
156 if h.bitmap_mapped >= n {
157 return
160 sysMap(unsafe.Pointer(h.bitmap-n), n-h.bitmap_mapped, h.arena_reserved, &memstats.gc_sys)
161 h.bitmap_mapped = n
164 // heapBits provides access to the bitmap bits for a single heap word.
165 // The methods on heapBits take value receivers so that the compiler
166 // can more easily inline calls to those methods and registerize the
167 // struct fields independently.
168 type heapBits struct {
169 bitp *uint8
170 shift uint32
173 // markBits provides access to the mark bit for an object in the heap.
174 // bytep points to the byte holding the mark bit.
175 // mask is a byte with a single bit set that can be &ed with *bytep
176 // to see if the bit has been set.
177 // *m.byte&m.mask != 0 indicates the mark bit is set.
178 // index can be used along with span information to generate
179 // the address of the object in the heap.
180 // We maintain one set of mark bits for allocation and one for
181 // marking purposes.
182 type markBits struct {
183 bytep *uint8
184 mask uint8
185 index uintptr
188 //go:nosplit
189 func (s *mspan) allocBitsForIndex(allocBitIndex uintptr) markBits {
190 bytep, mask := s.allocBits.bitp(allocBitIndex)
191 return markBits{bytep, mask, allocBitIndex}
194 // refillaCache takes 8 bytes s.allocBits starting at whichByte
195 // and negates them so that ctz (count trailing zeros) instructions
196 // can be used. It then places these 8 bytes into the cached 64 bit
197 // s.allocCache.
198 func (s *mspan) refillAllocCache(whichByte uintptr) {
199 bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(whichByte)))
200 aCache := uint64(0)
201 aCache |= uint64(bytes[0])
202 aCache |= uint64(bytes[1]) << (1 * 8)
203 aCache |= uint64(bytes[2]) << (2 * 8)
204 aCache |= uint64(bytes[3]) << (3 * 8)
205 aCache |= uint64(bytes[4]) << (4 * 8)
206 aCache |= uint64(bytes[5]) << (5 * 8)
207 aCache |= uint64(bytes[6]) << (6 * 8)
208 aCache |= uint64(bytes[7]) << (7 * 8)
209 s.allocCache = ^aCache
212 // nextFreeIndex returns the index of the next free object in s at
213 // or after s.freeindex.
214 // There are hardware instructions that can be used to make this
215 // faster if profiling warrants it.
216 func (s *mspan) nextFreeIndex() uintptr {
217 sfreeindex := s.freeindex
218 snelems := s.nelems
219 if sfreeindex == snelems {
220 return sfreeindex
222 if sfreeindex > snelems {
223 throw("s.freeindex > s.nelems")
226 aCache := s.allocCache
228 bitIndex := sys.Ctz64(aCache)
229 for bitIndex == 64 {
230 // Move index to start of next cached bits.
231 sfreeindex = (sfreeindex + 64) &^ (64 - 1)
232 if sfreeindex >= snelems {
233 s.freeindex = snelems
234 return snelems
236 whichByte := sfreeindex / 8
237 // Refill s.allocCache with the next 64 alloc bits.
238 s.refillAllocCache(whichByte)
239 aCache = s.allocCache
240 bitIndex = sys.Ctz64(aCache)
241 // nothing available in cached bits
242 // grab the next 8 bytes and try again.
244 result := sfreeindex + uintptr(bitIndex)
245 if result >= snelems {
246 s.freeindex = snelems
247 return snelems
250 s.allocCache >>= uint(bitIndex + 1)
251 sfreeindex = result + 1
253 if sfreeindex%64 == 0 && sfreeindex != snelems {
254 // We just incremented s.freeindex so it isn't 0.
255 // As each 1 in s.allocCache was encountered and used for allocation
256 // it was shifted away. At this point s.allocCache contains all 0s.
257 // Refill s.allocCache so that it corresponds
258 // to the bits at s.allocBits starting at s.freeindex.
259 whichByte := sfreeindex / 8
260 s.refillAllocCache(whichByte)
262 s.freeindex = sfreeindex
263 return result
266 // isFree returns whether the index'th object in s is unallocated.
267 func (s *mspan) isFree(index uintptr) bool {
268 if index < s.freeindex {
269 return false
271 bytep, mask := s.allocBits.bitp(index)
272 return *bytep&mask == 0
275 func (s *mspan) objIndex(p uintptr) uintptr {
276 byteOffset := p - s.base()
277 if byteOffset == 0 {
278 return 0
280 if s.baseMask != 0 {
281 // s.baseMask is 0, elemsize is a power of two, so shift by s.divShift
282 return byteOffset >> s.divShift
284 return uintptr(((uint64(byteOffset) >> s.divShift) * uint64(s.divMul)) >> s.divShift2)
287 func markBitsForAddr(p uintptr) markBits {
288 s := spanOf(p)
289 objIndex := s.objIndex(p)
290 return s.markBitsForIndex(objIndex)
293 func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
294 bytep, mask := s.gcmarkBits.bitp(objIndex)
295 return markBits{bytep, mask, objIndex}
298 func (s *mspan) markBitsForBase() markBits {
299 return markBits{(*uint8)(s.gcmarkBits), uint8(1), 0}
302 // isMarked reports whether mark bit m is set.
303 func (m markBits) isMarked() bool {
304 return *m.bytep&m.mask != 0
307 // setMarked sets the marked bit in the markbits, atomically. Some compilers
308 // are not able to inline atomic.Or8 function so if it appears as a hot spot consider
309 // inlining it manually.
310 func (m markBits) setMarked() {
311 // Might be racing with other updates, so use atomic update always.
312 // We used to be clever here and use a non-atomic update in certain
313 // cases, but it's not worth the risk.
314 atomic.Or8(m.bytep, m.mask)
317 // setMarkedNonAtomic sets the marked bit in the markbits, non-atomically.
318 func (m markBits) setMarkedNonAtomic() {
319 *m.bytep |= m.mask
322 // clearMarked clears the marked bit in the markbits, atomically.
323 func (m markBits) clearMarked() {
324 // Might be racing with other updates, so use atomic update always.
325 // We used to be clever here and use a non-atomic update in certain
326 // cases, but it's not worth the risk.
327 atomic.And8(m.bytep, ^m.mask)
330 // markBitsForSpan returns the markBits for the span base address base.
331 func markBitsForSpan(base uintptr) (mbits markBits) {
332 if base < mheap_.arena_start || base >= mheap_.arena_used {
333 throw("markBitsForSpan: base out of range")
335 mbits = markBitsForAddr(base)
336 if mbits.mask != 1 {
337 throw("markBitsForSpan: unaligned start")
339 return mbits
342 // advance advances the markBits to the next object in the span.
343 func (m *markBits) advance() {
344 if m.mask == 1<<7 {
345 m.bytep = (*uint8)(unsafe.Pointer(uintptr(unsafe.Pointer(m.bytep)) + 1))
346 m.mask = 1
347 } else {
348 m.mask = m.mask << 1
350 m.index++
353 // heapBitsForAddr returns the heapBits for the address addr.
354 // The caller must have already checked that addr is in the range [mheap_.arena_start, mheap_.arena_used).
356 // nosplit because it is used during write barriers and must not be preempted.
357 //go:nosplit
358 func heapBitsForAddr(addr uintptr) heapBits {
359 // 2 bits per work, 4 pairs per byte, and a mask is hard coded.
360 off := (addr - mheap_.arena_start) / sys.PtrSize
361 return heapBits{(*uint8)(unsafe.Pointer(mheap_.bitmap - off/4 - 1)), uint32(off & 3)}
364 // heapBitsForSpan returns the heapBits for the span base address base.
365 func heapBitsForSpan(base uintptr) (hbits heapBits) {
366 if base < mheap_.arena_start || base >= mheap_.arena_used {
367 print("runtime: base ", hex(base), " not in range [", hex(mheap_.arena_start), ",", hex(mheap_.arena_used), ")\n")
368 throw("heapBitsForSpan: base out of range")
370 return heapBitsForAddr(base)
373 // heapBitsForObject returns the base address for the heap object
374 // containing the address p, the heapBits for base,
375 // the object's span, and of the index of the object in s.
376 // If p does not point into a heap object,
377 // return base == 0
378 // otherwise return the base of the object.
380 // For gccgo, the forStack parameter is true if the value came from the stack.
381 // The stack is collected conservatively and may contain invalid pointers.
383 // refBase and refOff optionally give the base address of the object
384 // in which the pointer p was found and the byte offset at which it
385 // was found. These are used for error reporting.
386 func heapBitsForObject(p, refBase, refOff uintptr, forStack bool) (base uintptr, hbits heapBits, s *mspan, objIndex uintptr) {
387 arenaStart := mheap_.arena_start
388 if p < arenaStart || p >= mheap_.arena_used {
389 return
391 off := p - arenaStart
392 idx := off >> _PageShift
393 // p points into the heap, but possibly to the middle of an object.
394 // Consult the span table to find the block beginning.
395 s = mheap_.spans[idx]
396 if s == nil || p < s.base() || p >= s.limit || s.state != mSpanInUse {
397 if s == nil || s.state == _MSpanManual || forStack {
398 // If s is nil, the virtual address has never been part of the heap.
399 // This pointer may be to some mmap'd region, so we allow it.
400 // Pointers into stacks are also ok, the runtime manages these explicitly.
401 return
404 // The following ensures that we are rigorous about what data
405 // structures hold valid pointers.
406 if debug.invalidptr != 0 {
407 // Typically this indicates an incorrect use
408 // of unsafe or cgo to store a bad pointer in
409 // the Go heap. It may also indicate a runtime
410 // bug.
412 // TODO(austin): We could be more aggressive
413 // and detect pointers to unallocated objects
414 // in allocated spans.
415 printlock()
416 print("runtime: pointer ", hex(p))
417 if s.state != mSpanInUse {
418 print(" to unallocated span")
419 } else {
420 print(" to unused region of span")
422 print(" idx=", hex(idx), " span.base()=", hex(s.base()), " span.limit=", hex(s.limit), " span.state=", s.state, "\n")
423 if refBase != 0 {
424 print("runtime: found in object at *(", hex(refBase), "+", hex(refOff), ")\n")
425 gcDumpObject("object", refBase, refOff)
427 getg().m.traceback = 2
428 throw("found bad pointer in Go heap (incorrect use of unsafe or cgo?)")
430 return
433 if forStack {
434 // A span can be entered in mheap_.spans, and be set
435 // to mSpanInUse, before it is fully initialized.
436 // All we need in practice is allocBits and gcmarkBits,
437 // so make sure they are set.
438 if s.allocBits == nil || s.gcmarkBits == nil {
439 return
443 // If this span holds object of a power of 2 size, just mask off the bits to
444 // the interior of the object. Otherwise use the size to get the base.
445 if s.baseMask != 0 {
446 // optimize for power of 2 sized objects.
447 base = s.base()
448 base = base + (p-base)&uintptr(s.baseMask)
449 objIndex = (base - s.base()) >> s.divShift
450 // base = p & s.baseMask is faster for small spans,
451 // but doesn't work for large spans.
452 // Overall, it's faster to use the more general computation above.
453 } else {
454 base = s.base()
455 if p-base >= s.elemsize {
456 // n := (p - base) / s.elemsize, using division by multiplication
457 objIndex = uintptr(p-base) >> s.divShift * uintptr(s.divMul) >> s.divShift2
458 base += objIndex * s.elemsize
461 // Now that we know the actual base, compute heapBits to return to caller.
462 hbits = heapBitsForAddr(base)
463 return
466 // prefetch the bits.
467 func (h heapBits) prefetch() {
468 prefetchnta(uintptr(unsafe.Pointer((h.bitp))))
471 // next returns the heapBits describing the next pointer-sized word in memory.
472 // That is, if h describes address p, h.next() describes p+ptrSize.
473 // Note that next does not modify h. The caller must record the result.
475 // nosplit because it is used during write barriers and must not be preempted.
476 //go:nosplit
477 func (h heapBits) next() heapBits {
478 if h.shift < 3*heapBitsShift {
479 return heapBits{h.bitp, h.shift + heapBitsShift}
481 return heapBits{subtract1(h.bitp), 0}
484 // forward returns the heapBits describing n pointer-sized words ahead of h in memory.
485 // That is, if h describes address p, h.forward(n) describes p+n*ptrSize.
486 // h.forward(1) is equivalent to h.next(), just slower.
487 // Note that forward does not modify h. The caller must record the result.
488 // bits returns the heap bits for the current word.
489 func (h heapBits) forward(n uintptr) heapBits {
490 n += uintptr(h.shift) / heapBitsShift
491 return heapBits{subtractb(h.bitp, n/4), uint32(n%4) * heapBitsShift}
494 // The caller can test morePointers and isPointer by &-ing with bitScan and bitPointer.
495 // The result includes in its higher bits the bits for subsequent words
496 // described by the same bitmap byte.
497 func (h heapBits) bits() uint32 {
498 // The (shift & 31) eliminates a test and conditional branch
499 // from the generated code.
500 return uint32(*h.bitp) >> (h.shift & 31)
503 // morePointers returns true if this word and all remaining words in this object
504 // are scalars.
505 // h must not describe the second word of the object.
506 func (h heapBits) morePointers() bool {
507 return h.bits()&bitScan != 0
510 // isPointer reports whether the heap bits describe a pointer word.
512 // nosplit because it is used during write barriers and must not be preempted.
513 //go:nosplit
514 func (h heapBits) isPointer() bool {
515 return h.bits()&bitPointer != 0
518 // isCheckmarked reports whether the heap bits have the checkmarked bit set.
519 // It must be told how large the object at h is, because the encoding of the
520 // checkmark bit varies by size.
521 // h must describe the initial word of the object.
522 func (h heapBits) isCheckmarked(size uintptr) bool {
523 if size == sys.PtrSize {
524 return (*h.bitp>>h.shift)&bitPointer != 0
526 // All multiword objects are 2-word aligned,
527 // so we know that the initial word's 2-bit pair
528 // and the second word's 2-bit pair are in the
529 // same heap bitmap byte, *h.bitp.
530 return (*h.bitp>>(heapBitsShift+h.shift))&bitScan != 0
533 // setCheckmarked sets the checkmarked bit.
534 // It must be told how large the object at h is, because the encoding of the
535 // checkmark bit varies by size.
536 // h must describe the initial word of the object.
537 func (h heapBits) setCheckmarked(size uintptr) {
538 if size == sys.PtrSize {
539 atomic.Or8(h.bitp, bitPointer<<h.shift)
540 return
542 atomic.Or8(h.bitp, bitScan<<(heapBitsShift+h.shift))
545 // bulkBarrierPreWrite executes writebarrierptr_prewrite1
546 // for every pointer slot in the memory range [src, src+size),
547 // using pointer/scalar information from [dst, dst+size).
548 // This executes the write barriers necessary before a memmove.
549 // src, dst, and size must be pointer-aligned.
550 // The range [dst, dst+size) must lie within a single object.
552 // As a special case, src == 0 indicates that this is being used for a
553 // memclr. bulkBarrierPreWrite will pass 0 for the src of each write
554 // barrier.
556 // Callers should call bulkBarrierPreWrite immediately before
557 // calling memmove(dst, src, size). This function is marked nosplit
558 // to avoid being preempted; the GC must not stop the goroutine
559 // between the memmove and the execution of the barriers.
560 // The caller is also responsible for cgo pointer checks if this
561 // may be writing Go pointers into non-Go memory.
563 // The pointer bitmap is not maintained for allocations containing
564 // no pointers at all; any caller of bulkBarrierPreWrite must first
565 // make sure the underlying allocation contains pointers, usually
566 // by checking typ.kind&kindNoPointers.
568 //go:nosplit
569 func bulkBarrierPreWrite(dst, src, size uintptr) {
570 if (dst|src|size)&(sys.PtrSize-1) != 0 {
571 throw("bulkBarrierPreWrite: unaligned arguments")
573 if !writeBarrier.needed {
574 return
576 if !inheap(dst) {
577 // If dst is a global, use the data or BSS bitmaps to
578 // execute write barriers.
579 roots := gcRoots
580 for roots != nil {
581 for i := 0; i < roots.count; i++ {
582 pr := roots.roots[i]
583 addr := uintptr(pr.decl)
584 if addr <= dst && dst < addr+pr.size {
585 if dst < addr+pr.ptrdata {
586 bulkBarrierBitmap(dst, src, size, dst-addr, pr.gcdata)
588 return
591 roots = roots.next
593 return
596 h := heapBitsForAddr(dst)
597 if src == 0 {
598 for i := uintptr(0); i < size; i += sys.PtrSize {
599 if h.isPointer() {
600 dstx := (*uintptr)(unsafe.Pointer(dst + i))
601 writebarrierptr_prewrite1(dstx, 0)
603 h = h.next()
605 } else {
606 for i := uintptr(0); i < size; i += sys.PtrSize {
607 if h.isPointer() {
608 dstx := (*uintptr)(unsafe.Pointer(dst + i))
609 srcx := (*uintptr)(unsafe.Pointer(src + i))
610 writebarrierptr_prewrite1(dstx, *srcx)
612 h = h.next()
617 // bulkBarrierBitmap executes write barriers for copying from [src,
618 // src+size) to [dst, dst+size) using a 1-bit pointer bitmap. src is
619 // assumed to start maskOffset bytes into the data covered by the
620 // bitmap in bits (which may not be a multiple of 8).
622 // This is used by bulkBarrierPreWrite for writes to data and BSS.
624 //go:nosplit
625 func bulkBarrierBitmap(dst, src, size, maskOffset uintptr, bits *uint8) {
626 word := maskOffset / sys.PtrSize
627 bits = addb(bits, word/8)
628 mask := uint8(1) << (word % 8)
630 for i := uintptr(0); i < size; i += sys.PtrSize {
631 if mask == 0 {
632 bits = addb(bits, 1)
633 if *bits == 0 {
634 // Skip 8 words.
635 i += 7 * sys.PtrSize
636 continue
638 mask = 1
640 if *bits&mask != 0 {
641 dstx := (*uintptr)(unsafe.Pointer(dst + i))
642 if src == 0 {
643 writebarrierptr_prewrite1(dstx, 0)
644 } else {
645 srcx := (*uintptr)(unsafe.Pointer(src + i))
646 writebarrierptr_prewrite1(dstx, *srcx)
649 mask <<= 1
653 // typeBitsBulkBarrier executes writebarrierptr_prewrite for every
654 // pointer that would be copied from [src, src+size) to [dst,
655 // dst+size) by a memmove using the type bitmap to locate those
656 // pointer slots.
658 // The type typ must correspond exactly to [src, src+size) and [dst, dst+size).
659 // dst, src, and size must be pointer-aligned.
660 // The type typ must have a plain bitmap, not a GC program.
661 // The only use of this function is in channel sends, and the
662 // 64 kB channel element limit takes care of this for us.
664 // Must not be preempted because it typically runs right before memmove,
665 // and the GC must observe them as an atomic action.
667 //go:nosplit
668 func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) {
669 if typ == nil {
670 throw("runtime: typeBitsBulkBarrier without type")
672 if typ.size != size {
673 println("runtime: typeBitsBulkBarrier with type ", *typ.string, " of size ", typ.size, " but memory size", size)
674 throw("runtime: invalid typeBitsBulkBarrier")
676 if typ.kind&kindGCProg != 0 {
677 println("runtime: typeBitsBulkBarrier with type ", *typ.string, " with GC prog")
678 throw("runtime: invalid typeBitsBulkBarrier")
680 if !writeBarrier.needed {
681 return
683 ptrmask := typ.gcdata
684 var bits uint32
685 for i := uintptr(0); i < typ.ptrdata; i += sys.PtrSize {
686 if i&(sys.PtrSize*8-1) == 0 {
687 bits = uint32(*ptrmask)
688 ptrmask = addb(ptrmask, 1)
689 } else {
690 bits = bits >> 1
692 if bits&1 != 0 {
693 dstx := (*uintptr)(unsafe.Pointer(dst + i))
694 srcx := (*uintptr)(unsafe.Pointer(src + i))
695 writebarrierptr_prewrite(dstx, *srcx)
700 // The methods operating on spans all require that h has been returned
701 // by heapBitsForSpan and that size, n, total are the span layout description
702 // returned by the mspan's layout method.
703 // If total > size*n, it means that there is extra leftover memory in the span,
704 // usually due to rounding.
706 // TODO(rsc): Perhaps introduce a different heapBitsSpan type.
708 // initSpan initializes the heap bitmap for a span.
709 // It clears all checkmark bits.
710 // If this is a span of pointer-sized objects, it initializes all
711 // words to pointer/scan.
712 // Otherwise, it initializes all words to scalar/dead.
713 func (h heapBits) initSpan(s *mspan) {
714 size, n, total := s.layout()
716 // Init the markbit structures
717 s.freeindex = 0
718 s.allocCache = ^uint64(0) // all 1s indicating all free.
719 s.nelems = n
720 s.allocBits = nil
721 s.gcmarkBits = nil
722 s.gcmarkBits = newMarkBits(s.nelems)
723 s.allocBits = newAllocBits(s.nelems)
725 // Clear bits corresponding to objects.
726 if total%heapBitmapScale != 0 {
727 throw("initSpan: unaligned length")
729 nbyte := total / heapBitmapScale
730 if sys.PtrSize == 8 && size == sys.PtrSize {
731 end := h.bitp
732 bitp := subtractb(end, nbyte-1)
733 for {
734 *bitp = bitPointerAll | bitScanAll
735 if bitp == end {
736 break
738 bitp = add1(bitp)
740 return
742 memclrNoHeapPointers(unsafe.Pointer(subtractb(h.bitp, nbyte-1)), nbyte)
745 // initCheckmarkSpan initializes a span for being checkmarked.
746 // It clears the checkmark bits, which are set to 1 in normal operation.
747 func (h heapBits) initCheckmarkSpan(size, n, total uintptr) {
748 // The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
749 if sys.PtrSize == 8 && size == sys.PtrSize {
750 // Checkmark bit is type bit, bottom bit of every 2-bit entry.
751 // Only possible on 64-bit system, since minimum size is 8.
752 // Must clear type bit (checkmark bit) of every word.
753 // The type bit is the lower of every two-bit pair.
754 bitp := h.bitp
755 for i := uintptr(0); i < n; i += 4 {
756 *bitp &^= bitPointerAll
757 bitp = subtract1(bitp)
759 return
761 for i := uintptr(0); i < n; i++ {
762 *h.bitp &^= bitScan << (heapBitsShift + h.shift)
763 h = h.forward(size / sys.PtrSize)
767 // clearCheckmarkSpan undoes all the checkmarking in a span.
768 // The actual checkmark bits are ignored, so the only work to do
769 // is to fix the pointer bits. (Pointer bits are ignored by scanobject
770 // but consulted by typedmemmove.)
771 func (h heapBits) clearCheckmarkSpan(size, n, total uintptr) {
772 // The ptrSize == 8 is a compile-time constant false on 32-bit and eliminates this code entirely.
773 if sys.PtrSize == 8 && size == sys.PtrSize {
774 // Checkmark bit is type bit, bottom bit of every 2-bit entry.
775 // Only possible on 64-bit system, since minimum size is 8.
776 // Must clear type bit (checkmark bit) of every word.
777 // The type bit is the lower of every two-bit pair.
778 bitp := h.bitp
779 for i := uintptr(0); i < n; i += 4 {
780 *bitp |= bitPointerAll
781 bitp = subtract1(bitp)
786 // oneBitCount is indexed by byte and produces the
787 // number of 1 bits in that byte. For example 128 has 1 bit set
788 // and oneBitCount[128] will holds 1.
789 var oneBitCount = [256]uint8{
790 0, 1, 1, 2, 1, 2, 2, 3,
791 1, 2, 2, 3, 2, 3, 3, 4,
792 1, 2, 2, 3, 2, 3, 3, 4,
793 2, 3, 3, 4, 3, 4, 4, 5,
794 1, 2, 2, 3, 2, 3, 3, 4,
795 2, 3, 3, 4, 3, 4, 4, 5,
796 2, 3, 3, 4, 3, 4, 4, 5,
797 3, 4, 4, 5, 4, 5, 5, 6,
798 1, 2, 2, 3, 2, 3, 3, 4,
799 2, 3, 3, 4, 3, 4, 4, 5,
800 2, 3, 3, 4, 3, 4, 4, 5,
801 3, 4, 4, 5, 4, 5, 5, 6,
802 2, 3, 3, 4, 3, 4, 4, 5,
803 3, 4, 4, 5, 4, 5, 5, 6,
804 3, 4, 4, 5, 4, 5, 5, 6,
805 4, 5, 5, 6, 5, 6, 6, 7,
806 1, 2, 2, 3, 2, 3, 3, 4,
807 2, 3, 3, 4, 3, 4, 4, 5,
808 2, 3, 3, 4, 3, 4, 4, 5,
809 3, 4, 4, 5, 4, 5, 5, 6,
810 2, 3, 3, 4, 3, 4, 4, 5,
811 3, 4, 4, 5, 4, 5, 5, 6,
812 3, 4, 4, 5, 4, 5, 5, 6,
813 4, 5, 5, 6, 5, 6, 6, 7,
814 2, 3, 3, 4, 3, 4, 4, 5,
815 3, 4, 4, 5, 4, 5, 5, 6,
816 3, 4, 4, 5, 4, 5, 5, 6,
817 4, 5, 5, 6, 5, 6, 6, 7,
818 3, 4, 4, 5, 4, 5, 5, 6,
819 4, 5, 5, 6, 5, 6, 6, 7,
820 4, 5, 5, 6, 5, 6, 6, 7,
821 5, 6, 6, 7, 6, 7, 7, 8}
823 // countAlloc returns the number of objects allocated in span s by
824 // scanning the allocation bitmap.
825 // TODO:(rlh) Use popcount intrinsic.
826 func (s *mspan) countAlloc() int {
827 count := 0
828 maxIndex := s.nelems / 8
829 for i := uintptr(0); i < maxIndex; i++ {
830 mrkBits := *s.gcmarkBits.bytep(i)
831 count += int(oneBitCount[mrkBits])
833 if bitsInLastByte := s.nelems % 8; bitsInLastByte != 0 {
834 mrkBits := *s.gcmarkBits.bytep(maxIndex)
835 mask := uint8((1 << bitsInLastByte) - 1)
836 bits := mrkBits & mask
837 count += int(oneBitCount[bits])
839 return count
842 // heapBitsSetType records that the new allocation [x, x+size)
843 // holds in [x, x+dataSize) one or more values of type typ.
844 // (The number of values is given by dataSize / typ.size.)
845 // If dataSize < size, the fragment [x+dataSize, x+size) is
846 // recorded as non-pointer data.
847 // It is known that the type has pointers somewhere;
848 // malloc does not call heapBitsSetType when there are no pointers,
849 // because all free objects are marked as noscan during
850 // heapBitsSweepSpan.
852 // There can only be one allocation from a given span active at a time,
853 // and the bitmap for a span always falls on byte boundaries,
854 // so there are no write-write races for access to the heap bitmap.
855 // Hence, heapBitsSetType can access the bitmap without atomics.
857 // There can be read-write races between heapBitsSetType and things
858 // that read the heap bitmap like scanobject. However, since
859 // heapBitsSetType is only used for objects that have not yet been
860 // made reachable, readers will ignore bits being modified by this
861 // function. This does mean this function cannot transiently modify
862 // bits that belong to neighboring objects. Also, on weakly-ordered
863 // machines, callers must execute a store/store (publication) barrier
864 // between calling this function and making the object reachable.
865 func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
866 const doubleCheck = false // slow but helpful; enable to test modifications to this code
868 // dataSize is always size rounded up to the next malloc size class,
869 // except in the case of allocating a defer block, in which case
870 // size is sizeof(_defer{}) (at least 6 words) and dataSize may be
871 // arbitrarily larger.
873 // The checks for size == sys.PtrSize and size == 2*sys.PtrSize can therefore
874 // assume that dataSize == size without checking it explicitly.
876 if sys.PtrSize == 8 && size == sys.PtrSize {
877 // It's one word and it has pointers, it must be a pointer.
878 // Since all allocated one-word objects are pointers
879 // (non-pointers are aggregated into tinySize allocations),
880 // initSpan sets the pointer bits for us. Nothing to do here.
881 if doubleCheck {
882 h := heapBitsForAddr(x)
883 if !h.isPointer() {
884 throw("heapBitsSetType: pointer bit missing")
886 if !h.morePointers() {
887 throw("heapBitsSetType: scan bit missing")
890 return
893 h := heapBitsForAddr(x)
894 ptrmask := typ.gcdata // start of 1-bit pointer mask (or GC program, handled below)
896 // Heap bitmap bits for 2-word object are only 4 bits,
897 // so also shared with objects next to it.
898 // This is called out as a special case primarily for 32-bit systems,
899 // so that on 32-bit systems the code below can assume all objects
900 // are 4-word aligned (because they're all 16-byte aligned).
901 if size == 2*sys.PtrSize {
902 if typ.size == sys.PtrSize {
903 // We're allocating a block big enough to hold two pointers.
904 // On 64-bit, that means the actual object must be two pointers,
905 // or else we'd have used the one-pointer-sized block.
906 // On 32-bit, however, this is the 8-byte block, the smallest one.
907 // So it could be that we're allocating one pointer and this was
908 // just the smallest block available. Distinguish by checking dataSize.
909 // (In general the number of instances of typ being allocated is
910 // dataSize/typ.size.)
911 if sys.PtrSize == 4 && dataSize == sys.PtrSize {
912 // 1 pointer object. On 32-bit machines clear the bit for the
913 // unused second word.
914 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
915 *h.bitp |= (bitPointer | bitScan) << h.shift
916 } else {
917 // 2-element slice of pointer.
918 *h.bitp |= (bitPointer | bitScan | bitPointer<<heapBitsShift) << h.shift
920 return
922 // Otherwise typ.size must be 2*sys.PtrSize,
923 // and typ.kind&kindGCProg == 0.
924 if doubleCheck {
925 if typ.size != 2*sys.PtrSize || typ.kind&kindGCProg != 0 {
926 print("runtime: heapBitsSetType size=", size, " but typ.size=", typ.size, " gcprog=", typ.kind&kindGCProg != 0, "\n")
927 throw("heapBitsSetType")
930 b := uint32(*ptrmask)
931 hb := (b & 3) | bitScan
932 // bitPointer == 1, bitScan is 1 << 4, heapBitsShift is 1.
933 // 110011 is shifted h.shift and complemented.
934 // This clears out the bits that are about to be
935 // ored into *h.hbitp in the next instructions.
936 *h.bitp &^= (bitPointer | bitScan | ((bitPointer | bitScan) << heapBitsShift)) << h.shift
937 *h.bitp |= uint8(hb << h.shift)
938 return
941 // Copy from 1-bit ptrmask into 2-bit bitmap.
942 // The basic approach is to use a single uintptr as a bit buffer,
943 // alternating between reloading the buffer and writing bitmap bytes.
944 // In general, one load can supply two bitmap byte writes.
945 // This is a lot of lines of code, but it compiles into relatively few
946 // machine instructions.
948 var (
949 // Ptrmask input.
950 p *byte // last ptrmask byte read
951 b uintptr // ptrmask bits already loaded
952 nb uintptr // number of bits in b at next read
953 endp *byte // final ptrmask byte to read (then repeat)
954 endnb uintptr // number of valid bits in *endp
955 pbits uintptr // alternate source of bits
957 // Heap bitmap output.
958 w uintptr // words processed
959 nw uintptr // number of words to process
960 hbitp *byte // next heap bitmap byte to write
961 hb uintptr // bits being prepared for *hbitp
964 hbitp = h.bitp
966 // Handle GC program. Delayed until this part of the code
967 // so that we can use the same double-checking mechanism
968 // as the 1-bit case. Nothing above could have encountered
969 // GC programs: the cases were all too small.
970 if typ.kind&kindGCProg != 0 {
971 heapBitsSetTypeGCProg(h, typ.ptrdata, typ.size, dataSize, size, addb(typ.gcdata, 4))
972 if doubleCheck {
973 // Double-check the heap bits written by GC program
974 // by running the GC program to create a 1-bit pointer mask
975 // and then jumping to the double-check code below.
976 // This doesn't catch bugs shared between the 1-bit and 4-bit
977 // GC program execution, but it does catch mistakes specific
978 // to just one of those and bugs in heapBitsSetTypeGCProg's
979 // implementation of arrays.
980 lock(&debugPtrmask.lock)
981 if debugPtrmask.data == nil {
982 debugPtrmask.data = (*byte)(persistentalloc(1<<20, 1, &memstats.other_sys))
984 ptrmask = debugPtrmask.data
985 runGCProg(addb(typ.gcdata, 4), nil, ptrmask, 1)
986 goto Phase4
988 return
991 // Note about sizes:
993 // typ.size is the number of words in the object,
994 // and typ.ptrdata is the number of words in the prefix
995 // of the object that contains pointers. That is, the final
996 // typ.size - typ.ptrdata words contain no pointers.
997 // This allows optimization of a common pattern where
998 // an object has a small header followed by a large scalar
999 // buffer. If we know the pointers are over, we don't have
1000 // to scan the buffer's heap bitmap at all.
1001 // The 1-bit ptrmasks are sized to contain only bits for
1002 // the typ.ptrdata prefix, zero padded out to a full byte
1003 // of bitmap. This code sets nw (below) so that heap bitmap
1004 // bits are only written for the typ.ptrdata prefix; if there is
1005 // more room in the allocated object, the next heap bitmap
1006 // entry is a 00, indicating that there are no more pointers
1007 // to scan. So only the ptrmask for the ptrdata bytes is needed.
1009 // Replicated copies are not as nice: if there is an array of
1010 // objects with scalar tails, all but the last tail does have to
1011 // be initialized, because there is no way to say "skip forward".
1012 // However, because of the possibility of a repeated type with
1013 // size not a multiple of 4 pointers (one heap bitmap byte),
1014 // the code already must handle the last ptrmask byte specially
1015 // by treating it as containing only the bits for endnb pointers,
1016 // where endnb <= 4. We represent large scalar tails that must
1017 // be expanded in the replication by setting endnb larger than 4.
1018 // This will have the effect of reading many bits out of b,
1019 // but once the real bits are shifted out, b will supply as many
1020 // zero bits as we try to read, which is exactly what we need.
1022 p = ptrmask
1023 if typ.size < dataSize {
1024 // Filling in bits for an array of typ.
1025 // Set up for repetition of ptrmask during main loop.
1026 // Note that ptrmask describes only a prefix of
1027 const maxBits = sys.PtrSize*8 - 7
1028 if typ.ptrdata/sys.PtrSize <= maxBits {
1029 // Entire ptrmask fits in uintptr with room for a byte fragment.
1030 // Load into pbits and never read from ptrmask again.
1031 // This is especially important when the ptrmask has
1032 // fewer than 8 bits in it; otherwise the reload in the middle
1033 // of the Phase 2 loop would itself need to loop to gather
1034 // at least 8 bits.
1036 // Accumulate ptrmask into b.
1037 // ptrmask is sized to describe only typ.ptrdata, but we record
1038 // it as describing typ.size bytes, since all the high bits are zero.
1039 nb = typ.ptrdata / sys.PtrSize
1040 for i := uintptr(0); i < nb; i += 8 {
1041 b |= uintptr(*p) << i
1042 p = add1(p)
1044 nb = typ.size / sys.PtrSize
1046 // Replicate ptrmask to fill entire pbits uintptr.
1047 // Doubling and truncating is fewer steps than
1048 // iterating by nb each time. (nb could be 1.)
1049 // Since we loaded typ.ptrdata/sys.PtrSize bits
1050 // but are pretending to have typ.size/sys.PtrSize,
1051 // there might be no replication necessary/possible.
1052 pbits = b
1053 endnb = nb
1054 if nb+nb <= maxBits {
1055 for endnb <= sys.PtrSize*8 {
1056 pbits |= pbits << endnb
1057 endnb += endnb
1059 // Truncate to a multiple of original ptrmask.
1060 // Because nb+nb <= maxBits, nb fits in a byte.
1061 // Byte division is cheaper than uintptr division.
1062 endnb = uintptr(maxBits/byte(nb)) * nb
1063 pbits &= 1<<endnb - 1
1064 b = pbits
1065 nb = endnb
1068 // Clear p and endp as sentinel for using pbits.
1069 // Checked during Phase 2 loop.
1070 p = nil
1071 endp = nil
1072 } else {
1073 // Ptrmask is larger. Read it multiple times.
1074 n := (typ.ptrdata/sys.PtrSize+7)/8 - 1
1075 endp = addb(ptrmask, n)
1076 endnb = typ.size/sys.PtrSize - n*8
1079 if p != nil {
1080 b = uintptr(*p)
1081 p = add1(p)
1082 nb = 8
1085 if typ.size == dataSize {
1086 // Single entry: can stop once we reach the non-pointer data.
1087 nw = typ.ptrdata / sys.PtrSize
1088 } else {
1089 // Repeated instances of typ in an array.
1090 // Have to process first N-1 entries in full, but can stop
1091 // once we reach the non-pointer data in the final entry.
1092 nw = ((dataSize/typ.size-1)*typ.size + typ.ptrdata) / sys.PtrSize
1094 if nw == 0 {
1095 // No pointers! Caller was supposed to check.
1096 println("runtime: invalid type ", *typ.string)
1097 throw("heapBitsSetType: called with non-pointer type")
1098 return
1100 if nw < 2 {
1101 // Must write at least 2 words, because the "no scan"
1102 // encoding doesn't take effect until the third word.
1103 nw = 2
1106 // Phase 1: Special case for leading byte (shift==0) or half-byte (shift==4).
1107 // The leading byte is special because it contains the bits for word 1,
1108 // which does not have the scan bit set.
1109 // The leading half-byte is special because it's a half a byte,
1110 // so we have to be careful with the bits already there.
1111 switch {
1112 default:
1113 throw("heapBitsSetType: unexpected shift")
1115 case h.shift == 0:
1116 // Ptrmask and heap bitmap are aligned.
1117 // Handle first byte of bitmap specially.
1119 // The first byte we write out covers the first four
1120 // words of the object. The scan/dead bit on the first
1121 // word must be set to scan since there are pointers
1122 // somewhere in the object. The scan/dead bit on the
1123 // second word is the checkmark, so we don't set it.
1124 // In all following words, we set the scan/dead
1125 // appropriately to indicate that the object contains
1126 // to the next 2-bit entry in the bitmap.
1128 // TODO: It doesn't matter if we set the checkmark, so
1129 // maybe this case isn't needed any more.
1130 hb = b & bitPointerAll
1131 hb |= bitScan | bitScan<<(2*heapBitsShift) | bitScan<<(3*heapBitsShift)
1132 if w += 4; w >= nw {
1133 goto Phase3
1135 *hbitp = uint8(hb)
1136 hbitp = subtract1(hbitp)
1137 b >>= 4
1138 nb -= 4
1140 case sys.PtrSize == 8 && h.shift == 2:
1141 // Ptrmask and heap bitmap are misaligned.
1142 // The bits for the first two words are in a byte shared
1143 // with another object, so we must be careful with the bits
1144 // already there.
1145 // We took care of 1-word and 2-word objects above,
1146 // so this is at least a 6-word object.
1147 hb = (b & (bitPointer | bitPointer<<heapBitsShift)) << (2 * heapBitsShift)
1148 // This is not noscan, so set the scan bit in the
1149 // first word.
1150 hb |= bitScan << (2 * heapBitsShift)
1151 b >>= 2
1152 nb -= 2
1153 // Note: no bitScan for second word because that's
1154 // the checkmark.
1155 *hbitp &^= uint8((bitPointer | bitScan | (bitPointer << heapBitsShift)) << (2 * heapBitsShift))
1156 *hbitp |= uint8(hb)
1157 hbitp = subtract1(hbitp)
1158 if w += 2; w >= nw {
1159 // We know that there is more data, because we handled 2-word objects above.
1160 // This must be at least a 6-word object. If we're out of pointer words,
1161 // mark no scan in next bitmap byte and finish.
1162 hb = 0
1163 w += 4
1164 goto Phase3
1168 // Phase 2: Full bytes in bitmap, up to but not including write to last byte (full or partial) in bitmap.
1169 // The loop computes the bits for that last write but does not execute the write;
1170 // it leaves the bits in hb for processing by phase 3.
1171 // To avoid repeated adjustment of nb, we subtract out the 4 bits we're going to
1172 // use in the first half of the loop right now, and then we only adjust nb explicitly
1173 // if the 8 bits used by each iteration isn't balanced by 8 bits loaded mid-loop.
1174 nb -= 4
1175 for {
1176 // Emit bitmap byte.
1177 // b has at least nb+4 bits, with one exception:
1178 // if w+4 >= nw, then b has only nw-w bits,
1179 // but we'll stop at the break and then truncate
1180 // appropriately in Phase 3.
1181 hb = b & bitPointerAll
1182 hb |= bitScanAll
1183 if w += 4; w >= nw {
1184 break
1186 *hbitp = uint8(hb)
1187 hbitp = subtract1(hbitp)
1188 b >>= 4
1190 // Load more bits. b has nb right now.
1191 if p != endp {
1192 // Fast path: keep reading from ptrmask.
1193 // nb unmodified: we just loaded 8 bits,
1194 // and the next iteration will consume 8 bits,
1195 // leaving us with the same nb the next time we're here.
1196 if nb < 8 {
1197 b |= uintptr(*p) << nb
1198 p = add1(p)
1199 } else {
1200 // Reduce the number of bits in b.
1201 // This is important if we skipped
1202 // over a scalar tail, since nb could
1203 // be larger than the bit width of b.
1204 nb -= 8
1206 } else if p == nil {
1207 // Almost as fast path: track bit count and refill from pbits.
1208 // For short repetitions.
1209 if nb < 8 {
1210 b |= pbits << nb
1211 nb += endnb
1213 nb -= 8 // for next iteration
1214 } else {
1215 // Slow path: reached end of ptrmask.
1216 // Process final partial byte and rewind to start.
1217 b |= uintptr(*p) << nb
1218 nb += endnb
1219 if nb < 8 {
1220 b |= uintptr(*ptrmask) << nb
1221 p = add1(ptrmask)
1222 } else {
1223 nb -= 8
1224 p = ptrmask
1228 // Emit bitmap byte.
1229 hb = b & bitPointerAll
1230 hb |= bitScanAll
1231 if w += 4; w >= nw {
1232 break
1234 *hbitp = uint8(hb)
1235 hbitp = subtract1(hbitp)
1236 b >>= 4
1239 Phase3:
1240 // Phase 3: Write last byte or partial byte and zero the rest of the bitmap entries.
1241 if w > nw {
1242 // Counting the 4 entries in hb not yet written to memory,
1243 // there are more entries than possible pointer slots.
1244 // Discard the excess entries (can't be more than 3).
1245 mask := uintptr(1)<<(4-(w-nw)) - 1
1246 hb &= mask | mask<<4 // apply mask to both pointer bits and scan bits
1249 // Change nw from counting possibly-pointer words to total words in allocation.
1250 nw = size / sys.PtrSize
1252 // Write whole bitmap bytes.
1253 // The first is hb, the rest are zero.
1254 if w <= nw {
1255 *hbitp = uint8(hb)
1256 hbitp = subtract1(hbitp)
1257 hb = 0 // for possible final half-byte below
1258 for w += 4; w <= nw; w += 4 {
1259 *hbitp = 0
1260 hbitp = subtract1(hbitp)
1264 // Write final partial bitmap byte if any.
1265 // We know w > nw, or else we'd still be in the loop above.
1266 // It can be bigger only due to the 4 entries in hb that it counts.
1267 // If w == nw+4 then there's nothing left to do: we wrote all nw entries
1268 // and can discard the 4 sitting in hb.
1269 // But if w == nw+2, we need to write first two in hb.
1270 // The byte is shared with the next object, so be careful with
1271 // existing bits.
1272 if w == nw+2 {
1273 *hbitp = *hbitp&^(bitPointer|bitScan|(bitPointer|bitScan)<<heapBitsShift) | uint8(hb)
1276 Phase4:
1277 // Phase 4: all done, but perhaps double check.
1278 if doubleCheck {
1279 end := heapBitsForAddr(x + size)
1280 if typ.kind&kindGCProg == 0 && (hbitp != end.bitp || (w == nw+2) != (end.shift == 2)) {
1281 println("ended at wrong bitmap byte for", *typ.string, "x", dataSize/typ.size)
1282 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1283 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1284 h0 := heapBitsForAddr(x)
1285 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1286 print("ended at hbitp=", hbitp, " but next starts at bitp=", end.bitp, " shift=", end.shift, "\n")
1287 throw("bad heapBitsSetType")
1290 // Double-check that bits to be written were written correctly.
1291 // Does not check that other bits were not written, unfortunately.
1292 h := heapBitsForAddr(x)
1293 nptr := typ.ptrdata / sys.PtrSize
1294 ndata := typ.size / sys.PtrSize
1295 count := dataSize / typ.size
1296 totalptr := ((count-1)*typ.size + typ.ptrdata) / sys.PtrSize
1297 for i := uintptr(0); i < size/sys.PtrSize; i++ {
1298 j := i % ndata
1299 var have, want uint8
1300 have = (*h.bitp >> h.shift) & (bitPointer | bitScan)
1301 if i >= totalptr {
1302 want = 0 // deadmarker
1303 if typ.kind&kindGCProg != 0 && i < (totalptr+3)/4*4 {
1304 want = bitScan
1306 } else {
1307 if j < nptr && (*addb(ptrmask, j/8)>>(j%8))&1 != 0 {
1308 want |= bitPointer
1310 if i != 1 {
1311 want |= bitScan
1312 } else {
1313 have &^= bitScan
1316 if have != want {
1317 println("mismatch writing bits for", *typ.string, "x", dataSize/typ.size)
1318 print("typ.size=", typ.size, " typ.ptrdata=", typ.ptrdata, " dataSize=", dataSize, " size=", size, "\n")
1319 print("kindGCProg=", typ.kind&kindGCProg != 0, "\n")
1320 print("w=", w, " nw=", nw, " b=", hex(b), " nb=", nb, " hb=", hex(hb), "\n")
1321 h0 := heapBitsForAddr(x)
1322 print("initial bits h0.bitp=", h0.bitp, " h0.shift=", h0.shift, "\n")
1323 print("current bits h.bitp=", h.bitp, " h.shift=", h.shift, " *h.bitp=", hex(*h.bitp), "\n")
1324 print("ptrmask=", ptrmask, " p=", p, " endp=", endp, " endnb=", endnb, " pbits=", hex(pbits), " b=", hex(b), " nb=", nb, "\n")
1325 println("at word", i, "offset", i*sys.PtrSize, "have", have, "want", want)
1326 if typ.kind&kindGCProg != 0 {
1327 println("GC program:")
1328 dumpGCProg(addb(typ.gcdata, 4))
1330 throw("bad heapBitsSetType")
1332 h = h.next()
1334 if ptrmask == debugPtrmask.data {
1335 unlock(&debugPtrmask.lock)
1340 var debugPtrmask struct {
1341 lock mutex
1342 data *byte
1345 // heapBitsSetTypeGCProg implements heapBitsSetType using a GC program.
1346 // progSize is the size of the memory described by the program.
1347 // elemSize is the size of the element that the GC program describes (a prefix of).
1348 // dataSize is the total size of the intended data, a multiple of elemSize.
1349 // allocSize is the total size of the allocated memory.
1351 // GC programs are only used for large allocations.
1352 // heapBitsSetType requires that allocSize is a multiple of 4 words,
1353 // so that the relevant bitmap bytes are not shared with surrounding
1354 // objects.
1355 func heapBitsSetTypeGCProg(h heapBits, progSize, elemSize, dataSize, allocSize uintptr, prog *byte) {
1356 if sys.PtrSize == 8 && allocSize%(4*sys.PtrSize) != 0 {
1357 // Alignment will be wrong.
1358 throw("heapBitsSetTypeGCProg: small allocation")
1360 var totalBits uintptr
1361 if elemSize == dataSize {
1362 totalBits = runGCProg(prog, nil, h.bitp, 2)
1363 if totalBits*sys.PtrSize != progSize {
1364 println("runtime: heapBitsSetTypeGCProg: total bits", totalBits, "but progSize", progSize)
1365 throw("heapBitsSetTypeGCProg: unexpected bit count")
1367 } else {
1368 count := dataSize / elemSize
1370 // Piece together program trailer to run after prog that does:
1371 // literal(0)
1372 // repeat(1, elemSize-progSize-1) // zeros to fill element size
1373 // repeat(elemSize, count-1) // repeat that element for count
1374 // This zero-pads the data remaining in the first element and then
1375 // repeats that first element to fill the array.
1376 var trailer [40]byte // 3 varints (max 10 each) + some bytes
1377 i := 0
1378 if n := elemSize/sys.PtrSize - progSize/sys.PtrSize; n > 0 {
1379 // literal(0)
1380 trailer[i] = 0x01
1382 trailer[i] = 0
1384 if n > 1 {
1385 // repeat(1, n-1)
1386 trailer[i] = 0x81
1389 for ; n >= 0x80; n >>= 7 {
1390 trailer[i] = byte(n | 0x80)
1393 trailer[i] = byte(n)
1397 // repeat(elemSize/ptrSize, count-1)
1398 trailer[i] = 0x80
1400 n := elemSize / sys.PtrSize
1401 for ; n >= 0x80; n >>= 7 {
1402 trailer[i] = byte(n | 0x80)
1405 trailer[i] = byte(n)
1407 n = count - 1
1408 for ; n >= 0x80; n >>= 7 {
1409 trailer[i] = byte(n | 0x80)
1412 trailer[i] = byte(n)
1414 trailer[i] = 0
1417 runGCProg(prog, &trailer[0], h.bitp, 2)
1419 // Even though we filled in the full array just now,
1420 // record that we only filled in up to the ptrdata of the
1421 // last element. This will cause the code below to
1422 // memclr the dead section of the final array element,
1423 // so that scanobject can stop early in the final element.
1424 totalBits = (elemSize*(count-1) + progSize) / sys.PtrSize
1426 endProg := unsafe.Pointer(subtractb(h.bitp, (totalBits+3)/4))
1427 endAlloc := unsafe.Pointer(subtractb(h.bitp, allocSize/heapBitmapScale))
1428 memclrNoHeapPointers(add(endAlloc, 1), uintptr(endProg)-uintptr(endAlloc))
1431 // progToPointerMask returns the 1-bit pointer mask output by the GC program prog.
1432 // size the size of the region described by prog, in bytes.
1433 // The resulting bitvector will have no more than size/sys.PtrSize bits.
1434 func progToPointerMask(prog *byte, size uintptr) bitvector {
1435 n := (size/sys.PtrSize + 7) / 8
1436 x := (*[1 << 30]byte)(persistentalloc(n+1, 1, &memstats.buckhash_sys))[:n+1]
1437 x[len(x)-1] = 0xa1 // overflow check sentinel
1438 n = runGCProg(prog, nil, &x[0], 1)
1439 if x[len(x)-1] != 0xa1 {
1440 throw("progToPointerMask: overflow")
1442 return bitvector{int32(n), &x[0]}
1445 // Packed GC pointer bitmaps, aka GC programs.
1447 // For large types containing arrays, the type information has a
1448 // natural repetition that can be encoded to save space in the
1449 // binary and in the memory representation of the type information.
1451 // The encoding is a simple Lempel-Ziv style bytecode machine
1452 // with the following instructions:
1454 // 00000000: stop
1455 // 0nnnnnnn: emit n bits copied from the next (n+7)/8 bytes
1456 // 10000000 n c: repeat the previous n bits c times; n, c are varints
1457 // 1nnnnnnn c: repeat the previous n bits c times; c is a varint
1459 // runGCProg executes the GC program prog, and then trailer if non-nil,
1460 // writing to dst with entries of the given size.
1461 // If size == 1, dst is a 1-bit pointer mask laid out moving forward from dst.
1462 // If size == 2, dst is the 2-bit heap bitmap, and writes move backward
1463 // starting at dst (because the heap bitmap does). In this case, the caller guarantees
1464 // that only whole bytes in dst need to be written.
1466 // runGCProg returns the number of 1- or 2-bit entries written to memory.
1467 func runGCProg(prog, trailer, dst *byte, size int) uintptr {
1468 dstStart := dst
1470 // Bits waiting to be written to memory.
1471 var bits uintptr
1472 var nbits uintptr
1474 p := prog
1475 Run:
1476 for {
1477 // Flush accumulated full bytes.
1478 // The rest of the loop assumes that nbits <= 7.
1479 for ; nbits >= 8; nbits -= 8 {
1480 if size == 1 {
1481 *dst = uint8(bits)
1482 dst = add1(dst)
1483 bits >>= 8
1484 } else {
1485 v := bits&bitPointerAll | bitScanAll
1486 *dst = uint8(v)
1487 dst = subtract1(dst)
1488 bits >>= 4
1489 v = bits&bitPointerAll | bitScanAll
1490 *dst = uint8(v)
1491 dst = subtract1(dst)
1492 bits >>= 4
1496 // Process one instruction.
1497 inst := uintptr(*p)
1498 p = add1(p)
1499 n := inst & 0x7F
1500 if inst&0x80 == 0 {
1501 // Literal bits; n == 0 means end of program.
1502 if n == 0 {
1503 // Program is over; continue in trailer if present.
1504 if trailer != nil {
1505 //println("trailer")
1506 p = trailer
1507 trailer = nil
1508 continue
1510 //println("done")
1511 break Run
1513 //println("lit", n, dst)
1514 nbyte := n / 8
1515 for i := uintptr(0); i < nbyte; i++ {
1516 bits |= uintptr(*p) << nbits
1517 p = add1(p)
1518 if size == 1 {
1519 *dst = uint8(bits)
1520 dst = add1(dst)
1521 bits >>= 8
1522 } else {
1523 v := bits&0xf | bitScanAll
1524 *dst = uint8(v)
1525 dst = subtract1(dst)
1526 bits >>= 4
1527 v = bits&0xf | bitScanAll
1528 *dst = uint8(v)
1529 dst = subtract1(dst)
1530 bits >>= 4
1533 if n %= 8; n > 0 {
1534 bits |= uintptr(*p) << nbits
1535 p = add1(p)
1536 nbits += n
1538 continue Run
1541 // Repeat. If n == 0, it is encoded in a varint in the next bytes.
1542 if n == 0 {
1543 for off := uint(0); ; off += 7 {
1544 x := uintptr(*p)
1545 p = add1(p)
1546 n |= (x & 0x7F) << off
1547 if x&0x80 == 0 {
1548 break
1553 // Count is encoded in a varint in the next bytes.
1554 c := uintptr(0)
1555 for off := uint(0); ; off += 7 {
1556 x := uintptr(*p)
1557 p = add1(p)
1558 c |= (x & 0x7F) << off
1559 if x&0x80 == 0 {
1560 break
1563 c *= n // now total number of bits to copy
1565 // If the number of bits being repeated is small, load them
1566 // into a register and use that register for the entire loop
1567 // instead of repeatedly reading from memory.
1568 // Handling fewer than 8 bits here makes the general loop simpler.
1569 // The cutoff is sys.PtrSize*8 - 7 to guarantee that when we add
1570 // the pattern to a bit buffer holding at most 7 bits (a partial byte)
1571 // it will not overflow.
1572 src := dst
1573 const maxBits = sys.PtrSize*8 - 7
1574 if n <= maxBits {
1575 // Start with bits in output buffer.
1576 pattern := bits
1577 npattern := nbits
1579 // If we need more bits, fetch them from memory.
1580 if size == 1 {
1581 src = subtract1(src)
1582 for npattern < n {
1583 pattern <<= 8
1584 pattern |= uintptr(*src)
1585 src = subtract1(src)
1586 npattern += 8
1588 } else {
1589 src = add1(src)
1590 for npattern < n {
1591 pattern <<= 4
1592 pattern |= uintptr(*src) & 0xf
1593 src = add1(src)
1594 npattern += 4
1598 // We started with the whole bit output buffer,
1599 // and then we loaded bits from whole bytes.
1600 // Either way, we might now have too many instead of too few.
1601 // Discard the extra.
1602 if npattern > n {
1603 pattern >>= npattern - n
1604 npattern = n
1607 // Replicate pattern to at most maxBits.
1608 if npattern == 1 {
1609 // One bit being repeated.
1610 // If the bit is 1, make the pattern all 1s.
1611 // If the bit is 0, the pattern is already all 0s,
1612 // but we can claim that the number of bits
1613 // in the word is equal to the number we need (c),
1614 // because right shift of bits will zero fill.
1615 if pattern == 1 {
1616 pattern = 1<<maxBits - 1
1617 npattern = maxBits
1618 } else {
1619 npattern = c
1621 } else {
1622 b := pattern
1623 nb := npattern
1624 if nb+nb <= maxBits {
1625 // Double pattern until the whole uintptr is filled.
1626 for nb <= sys.PtrSize*8 {
1627 b |= b << nb
1628 nb += nb
1630 // Trim away incomplete copy of original pattern in high bits.
1631 // TODO(rsc): Replace with table lookup or loop on systems without divide?
1632 nb = maxBits / npattern * npattern
1633 b &= 1<<nb - 1
1634 pattern = b
1635 npattern = nb
1639 // Add pattern to bit buffer and flush bit buffer, c/npattern times.
1640 // Since pattern contains >8 bits, there will be full bytes to flush
1641 // on each iteration.
1642 for ; c >= npattern; c -= npattern {
1643 bits |= pattern << nbits
1644 nbits += npattern
1645 if size == 1 {
1646 for nbits >= 8 {
1647 *dst = uint8(bits)
1648 dst = add1(dst)
1649 bits >>= 8
1650 nbits -= 8
1652 } else {
1653 for nbits >= 4 {
1654 *dst = uint8(bits&0xf | bitScanAll)
1655 dst = subtract1(dst)
1656 bits >>= 4
1657 nbits -= 4
1662 // Add final fragment to bit buffer.
1663 if c > 0 {
1664 pattern &= 1<<c - 1
1665 bits |= pattern << nbits
1666 nbits += c
1668 continue Run
1671 // Repeat; n too large to fit in a register.
1672 // Since nbits <= 7, we know the first few bytes of repeated data
1673 // are already written to memory.
1674 off := n - nbits // n > nbits because n > maxBits and nbits <= 7
1675 if size == 1 {
1676 // Leading src fragment.
1677 src = subtractb(src, (off+7)/8)
1678 if frag := off & 7; frag != 0 {
1679 bits |= uintptr(*src) >> (8 - frag) << nbits
1680 src = add1(src)
1681 nbits += frag
1682 c -= frag
1684 // Main loop: load one byte, write another.
1685 // The bits are rotating through the bit buffer.
1686 for i := c / 8; i > 0; i-- {
1687 bits |= uintptr(*src) << nbits
1688 src = add1(src)
1689 *dst = uint8(bits)
1690 dst = add1(dst)
1691 bits >>= 8
1693 // Final src fragment.
1694 if c %= 8; c > 0 {
1695 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1696 nbits += c
1698 } else {
1699 // Leading src fragment.
1700 src = addb(src, (off+3)/4)
1701 if frag := off & 3; frag != 0 {
1702 bits |= (uintptr(*src) & 0xf) >> (4 - frag) << nbits
1703 src = subtract1(src)
1704 nbits += frag
1705 c -= frag
1707 // Main loop: load one byte, write another.
1708 // The bits are rotating through the bit buffer.
1709 for i := c / 4; i > 0; i-- {
1710 bits |= (uintptr(*src) & 0xf) << nbits
1711 src = subtract1(src)
1712 *dst = uint8(bits&0xf | bitScanAll)
1713 dst = subtract1(dst)
1714 bits >>= 4
1716 // Final src fragment.
1717 if c %= 4; c > 0 {
1718 bits |= (uintptr(*src) & (1<<c - 1)) << nbits
1719 nbits += c
1724 // Write any final bits out, using full-byte writes, even for the final byte.
1725 var totalBits uintptr
1726 if size == 1 {
1727 totalBits = (uintptr(unsafe.Pointer(dst))-uintptr(unsafe.Pointer(dstStart)))*8 + nbits
1728 nbits += -nbits & 7
1729 for ; nbits > 0; nbits -= 8 {
1730 *dst = uint8(bits)
1731 dst = add1(dst)
1732 bits >>= 8
1734 } else {
1735 totalBits = (uintptr(unsafe.Pointer(dstStart))-uintptr(unsafe.Pointer(dst)))*4 + nbits
1736 nbits += -nbits & 3
1737 for ; nbits > 0; nbits -= 4 {
1738 v := bits&0xf | bitScanAll
1739 *dst = uint8(v)
1740 dst = subtract1(dst)
1741 bits >>= 4
1744 return totalBits
1747 func dumpGCProg(p *byte) {
1748 nptr := 0
1749 for {
1750 x := *p
1751 p = add1(p)
1752 if x == 0 {
1753 print("\t", nptr, " end\n")
1754 break
1756 if x&0x80 == 0 {
1757 print("\t", nptr, " lit ", x, ":")
1758 n := int(x+7) / 8
1759 for i := 0; i < n; i++ {
1760 print(" ", hex(*p))
1761 p = add1(p)
1763 print("\n")
1764 nptr += int(x)
1765 } else {
1766 nbit := int(x &^ 0x80)
1767 if nbit == 0 {
1768 for nb := uint(0); ; nb += 7 {
1769 x := *p
1770 p = add1(p)
1771 nbit |= int(x&0x7f) << nb
1772 if x&0x80 == 0 {
1773 break
1777 count := 0
1778 for nb := uint(0); ; nb += 7 {
1779 x := *p
1780 p = add1(p)
1781 count |= int(x&0x7f) << nb
1782 if x&0x80 == 0 {
1783 break
1786 print("\t", nptr, " repeat ", nbit, " × ", count, "\n")
1787 nptr += nbit * count
1792 // Testing.
1794 // gcbits returns the GC type info for x, for testing.
1795 // The result is the bitmap entries (0 or 1), one entry per byte.
1796 //go:linkname reflect_gcbits reflect.gcbits
1797 func reflect_gcbits(x interface{}) []byte {
1798 ret := getgcmask(x)
1799 typ := (*ptrtype)(unsafe.Pointer(efaceOf(&x)._type)).elem
1800 nptr := typ.ptrdata / sys.PtrSize
1801 for uintptr(len(ret)) > nptr && ret[len(ret)-1] == 0 {
1802 ret = ret[:len(ret)-1]
1804 return ret
1807 // Returns GC type info for object p for testing.
1808 func getgcmask(ep interface{}) (mask []byte) {
1809 e := *efaceOf(&ep)
1810 p := e.data
1811 t := e._type
1812 // data or bss
1813 roots := gcRoots
1814 for roots != nil {
1815 for i := 0; i < roots.count; i++ {
1816 pr := roots.roots[i]
1817 addr := uintptr(pr.decl)
1818 if addr <= uintptr(p) && uintptr(p) < addr+pr.size {
1819 n := (*ptrtype)(unsafe.Pointer(t)).elem.size
1820 mask = make([]byte, n/sys.PtrSize)
1821 copy(mask, (*[1 << 29]uint8)(unsafe.Pointer(pr.gcdata))[:pr.ptrdata])
1823 return
1825 roots = roots.next
1828 // heap
1829 var n uintptr
1830 var base uintptr
1831 if mlookup(uintptr(p), &base, &n, nil) != 0 {
1832 mask = make([]byte, n/sys.PtrSize)
1833 for i := uintptr(0); i < n; i += sys.PtrSize {
1834 hbits := heapBitsForAddr(base + i)
1835 if hbits.isPointer() {
1836 mask[i/sys.PtrSize] = 1
1838 if i != 1*sys.PtrSize && !hbits.morePointers() {
1839 mask = mask[:i/sys.PtrSize]
1840 break
1843 return
1846 // otherwise, not something the GC knows about.
1847 // possibly read-only data, like malloc(0).
1848 // must not have pointers
1849 // For gccgo, may live on the stack, which is collected conservatively.
1850 return