1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
13 // Should be a built-in for unsafe.Pointer?
15 func add(p unsafe
.Pointer
, x
uintptr) unsafe
.Pointer
{
16 return unsafe
.Pointer(uintptr(p
) + x
)
19 // getg returns the pointer to the current g.
20 // The compiler rewrites calls to this function into instructions
21 // that fetch the g directly (from TLS or from the dedicated register).
24 // mcall switches from the g to the g0 stack and invokes fn(g),
25 // where g is the goroutine that made the call.
26 // mcall saves g's current PC/SP in g->sched so that it can be restored later.
27 // It is up to fn to arrange for that later execution, typically by recording
28 // g in a data structure, causing something to call ready(g) later.
29 // mcall returns to the original goroutine g later, when g has been rescheduled.
30 // fn must not return at all; typically it ends by calling schedule, to let the m
31 // run other goroutines.
33 // mcall can only be called from g stacks (not g0, not gsignal).
35 // This must NOT be go:noescape: if fn is a stack-allocated closure,
36 // fn puts g on a run queue, and g executes before fn returns, the
37 // closure will be invalidated while it is still executing.
38 func mcall(fn
func(*g
))
40 // systemstack runs fn on a system stack.
42 // It is common to use a func literal as the argument, in order
43 // to share inputs and outputs with the code around the call
47 // systemstack(func() {
52 // For the gc toolchain this permits running a function that requires
53 // additional stack space in a context where the stack can not be
54 // split. For gccgo, however, stack splitting is not managed by the
55 // Go runtime. In effect, all stacks are system stacks. So this gccgo
56 // version just runs the function.
57 func systemstack(fn
func()) {
61 func badsystemstack() {
62 throw("systemstack called from unexpected goroutine")
65 // memclr clears n bytes starting at ptr.
68 func memclr(ptr unsafe
.Pointer
, n
uintptr)
70 //go:linkname reflect_memclr reflect.memclr
71 func reflect_memclr(ptr unsafe
.Pointer
, n
uintptr) {
75 // memmove copies n bytes from "from" to "to".
77 func memmove(to
, from unsafe
.Pointer
, n
uintptr)
79 //go:linkname reflect_memmove reflect.memmove
80 func reflect_memmove(to
, from unsafe
.Pointer
, n
uintptr) {
85 //extern __builtin_memcmp
86 func memcmp(a
, b unsafe
.Pointer
, size
uintptr) int32
88 // exported value for testing
89 var hashLoad
= loadFactor
92 func fastrand1() uint32
96 func memequal(a
, b unsafe
.Pointer
, size
uintptr) bool
98 // noescape hides a pointer from escape analysis. noescape is
99 // the identity function but escape analysis doesn't think the
100 // output depends on the input. noescape is inlined and currently
101 // compiles down to a single xor instruction.
104 func noescape(p unsafe
.Pointer
) unsafe
.Pointer
{
106 return unsafe
.Pointer(x
^ 0)
109 func mincore(addr unsafe
.Pointer
, n
uintptr, dst
*byte) int32
112 func jmpdefer(fv
*funcval
, argp
uintptr)
113 func exit1(code
int32)
118 // reflectcall calls fn with a copy of the n argument bytes pointed at by arg.
119 // After fn returns, reflectcall copies n-retoffset result bytes
120 // back into arg+retoffset before returning. If copying result bytes back,
121 // the caller should pass the argument frame type as argtype, so that
122 // call can execute appropriate write barriers during the copy.
123 // Package reflect passes a frame type. In package runtime, there is only
124 // one call that copies results back, in cgocallbackg1, and it does NOT pass a
125 // frame type, meaning there are no write barriers invoked. See that call
126 // site for justification.
127 func reflectcall(argtype
*_type
, fn
, arg unsafe
.Pointer
, argsize
uint32, retoffset
uint32)
129 func procyield(cycles
uint32)
131 type neverCallThisFunction
struct{}
133 // goexit is the return stub at the top of every goroutine call stack.
134 // Each goroutine stack is constructed as if goexit called the
135 // goroutine's entry point function, so that when the entry point
136 // function returns, it will return to goexit, which will call goexit1
137 // to perform the actual exit.
139 // This function must never be called directly. Call goexit1 instead.
140 // gentraceback assumes that goexit terminates the stack. A direct
141 // call on the stack will cause gentraceback to stop walking the stack
142 // prematurely and if there are leftover stack barriers it may panic.
143 func goexit(neverCallThisFunction
)
145 // publicationBarrier performs a store/store barrier (a "publication"
146 // or "export" barrier). Some form of synchronization is required
147 // between initializing an object and making that object accessible to
148 // another processor. Without synchronization, the initialization
149 // writes and the "publication" write may be reordered, allowing the
150 // other processor to follow the pointer and observe an uninitialized
151 // object. In general, higher-level synchronization should be used,
152 // such as locking or an atomic pointer write. publicationBarrier is
153 // for when those aren't an option, such as in the implementation of
154 // the memory manager.
156 // There's no corresponding barrier for the read side because the read
157 // side naturally has a data dependency order. All architectures that
158 // Go supports or seems likely to ever support automatically enforce
159 // data dependency ordering.
160 func publicationBarrier()
163 func setcallerpc(argp unsafe
.Pointer
, pc
uintptr)
165 // getcallerpc returns the program counter (PC) of its caller's caller.
166 // getcallersp returns the stack pointer (SP) of its caller's caller.
167 // For both, the argp must be a pointer to the caller's first function argument.
168 // The implementation may or may not use argp, depending on
173 // func f(arg1, arg2, arg3 int) {
174 // pc := getcallerpc(unsafe.Pointer(&arg1))
175 // sp := getcallersp(unsafe.Pointer(&arg1))
178 // These two lines find the PC and SP immediately following
179 // the call to f (where f will return).
181 // The call to getcallerpc and getcallersp must be done in the
182 // frame being asked about. It would not be correct for f to pass &arg1
183 // to another function g and let g call getcallerpc/getcallersp.
184 // The call inside g might return information about g's caller or
185 // information about f's caller or complete garbage.
187 // The result of getcallersp is correct at the time of the return,
188 // but it may be invalidated by any subsequent call to a function
189 // that might relocate the stack in order to grow or shrink it.
190 // A general rule is that the result of getcallersp should be used
191 // immediately and can only be passed to nosplit functions.
194 func getcallerpc(argp unsafe
.Pointer
) uintptr
197 func getcallersp(argp unsafe
.Pointer
) uintptr
199 // argp used in Defer structs when there is no argp.
200 const _NoArgs
= ^uintptr(0)
202 //go:linkname time_now time.now
203 func time_now() (sec
int64, nsec
int32)
205 // For gccgo, expose this for C callers.
206 //go:linkname unixnanotime runtime.unixnanotime
207 func unixnanotime() int64 {
208 sec
, nsec
:= time_now()
209 return sec
*1e9
+ int64(nsec
)
212 // round n up to a multiple of a. a must be a power of 2.
213 func round(n
, a
uintptr) uintptr {
214 return (n
+ a
- 1) &^ (a
- 1)
217 // checkASM returns whether assembly runtime checks have passed.
218 func checkASM() bool {
222 // For gccgo this is in the C code.
225 // For gccgo this can be called directly.
227 func syscall(trap
uintptr, a1
, a2
, a3
, a4
, a5
, a6
uintptr) uintptr
229 // throw crashes the program.
230 // For gccgo unless and until we port panic.go.
233 // newobject allocates a new object.
234 // For gccgo unless and until we port malloc.go.
235 func newobject(*_type
) unsafe
.Pointer
237 // newarray allocates a new array of objects.
238 // For gccgo unless and until we port malloc.go.
239 func newarray(*_type
, int) unsafe
.Pointer
241 // funcPC returns the entry PC of the function f.
242 // It assumes that f is a func value. Otherwise the behavior is undefined.
243 // For gccgo here unless and until we port proc.go.
245 func funcPC(f
interface{}) uintptr {
246 return **(**uintptr)(add(unsafe
.Pointer(&f
), sys
.PtrSize
))
249 // typedmemmove copies a typed value.
250 // For gccgo for now.
252 func typedmemmove(typ
*_type
, dst
, src unsafe
.Pointer
) {
253 memmove(dst
, src
, typ
.size
)
256 // Temporary for gccgo until we port mbarrier.go.
257 //go:linkname typedslicecopy runtime.typedslicecopy
258 func typedslicecopy(typ
*_type
, dst
, src slice
) int {
266 memmove(dst
.array
, src
.array
, uintptr(n
)*typ
.size
)
270 // Here for gccgo until we port malloc.go.
272 _64bit
= 1 << (^uintptr(0) >> 63) / 2
273 _MHeapMap_TotalBits
= (_64bit
*sys
.GoosWindows
)*35 + (_64bit
*(1-sys
.GoosWindows
)*(1-sys
.GoosDarwin
*sys
.GoarchArm64
))*39 + sys
.GoosDarwin
*sys
.GoarchArm64
*31 + (1-_64bit
)*32
274 _MaxMem
= uintptr(1<<_MHeapMap_TotalBits
- 1)
277 // Here for gccgo until we port malloc.go.
278 //extern runtime_mallocgc
279 func c_mallocgc(size
uintptr, typ
uintptr, flag
uint32) unsafe
.Pointer
280 func mallocgc(size
uintptr, typ
*_type
, needzero
bool) unsafe
.Pointer
{
285 return c_mallocgc(size
, uintptr(unsafe
.Pointer(typ
)), flag
)
288 // Here for gccgo until we port mgc.go.
289 var writeBarrier
struct {
290 enabled
bool // compiler emits a check of this before calling write barrier
291 needed
bool // whether we need a write barrier for current GC phase
292 cgo
bool // whether we need a write barrier for a cgo check
293 alignme
uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
296 // Here for gccgo until we port atomic_pointer.go and mgc.go.
298 func casp(ptr
*unsafe
.Pointer
, old
, new unsafe
.Pointer
) bool {
299 if !atomic
.Casp1((*unsafe
.Pointer
)(noescape(unsafe
.Pointer(ptr
))), noescape(old
), new) {
305 // Here for gccgo until we port lock_*.go.
307 func unlock(l
*mutex
)
309 // Here for gccgo for netpoll and Solaris.
312 // Temporary for gccgo until we port proc.go.
313 func entersyscall(int32)
314 func entersyscallblock(int32)
315 func exitsyscall(int32)
316 func gopark(func(*g
, unsafe
.Pointer
) bool, unsafe
.Pointer
, string, byte, int)
317 func goparkunlock(*mutex
, string, byte, int)
318 func goready(*g
, int)
320 // Temporary hack for gccgo until we port proc.go.
322 func acquireSudog() *sudog
{
325 if len(pp
.sudogcache
) == 0 {
326 pp
.sudogcache
= append(pp
.sudogcache
, new(sudog
))
328 n
:= len(pp
.sudogcache
)
329 s
:= pp
.sudogcache
[n
-1]
330 pp
.sudogcache
[n
-1] = nil
331 pp
.sudogcache
= pp
.sudogcache
[:n
-1]
333 throw("acquireSudog: found s.elem != nil in cache")
339 // Temporary hack for gccgo until we port proc.go.
341 func releaseSudog(s
*sudog
) {
343 throw("runtime: sudog with non-nil elem")
345 if s
.selectdone
!= nil {
346 throw("runtime: sudog with non-nil selectdone")
349 throw("runtime: sudog with non-nil next")
352 throw("runtime: sudog with non-nil prev")
354 if s
.waitlink
!= nil {
355 throw("runtime: sudog with non-nil waitlink")
358 throw("runtime: sudog with non-nil c")
362 throw("runtime: releaseSudog with non-nil gp.param")
364 mp
:= acquirem() // avoid rescheduling to another P
366 pp
.sudogcache
= append(pp
.sudogcache
, s
)
370 // Temporary hack for gccgo until we port the garbage collector.
371 func typeBitsBulkBarrier(typ
*_type
, p
, size
uintptr) {}
373 // Here for gccgo until we port msize.go.
374 func roundupsize(uintptr) uintptr
376 // Here for gccgo until we port mgc.go.
379 // Here for gccgo until we port proc.go.
380 var worldsema
uint32 = 1
382 func stopTheWorldWithSema()
383 func startTheWorldWithSema()
385 // For gccgo to call from C code.
386 //go:linkname acquireWorldsema runtime.acquireWorldsema
387 func acquireWorldsema() {
388 semacquire(&worldsema
, false)
391 // For gccgo to call from C code.
392 //go:linkname releaseWorldsema runtime.releaseWorldsema
393 func releaseWorldsema() {
394 semrelease(&worldsema
)
397 // Here for gccgo until we port proc.go.
398 func stopTheWorld(reason
string) {
399 semacquire(&worldsema
, false)
400 getg().m
.preemptoff
= reason
402 systemstack(stopTheWorldWithSema
)
405 // Here for gccgo until we port proc.go.
406 func startTheWorld() {
409 systemstack(startTheWorldWithSema
)
410 // worldsema must be held over startTheWorldWithSema to ensure
411 // gomaxprocs cannot change while worldsema is held.
412 semrelease(&worldsema
)
413 getg().m
.preemptoff
= ""
417 // For gccgo to call from C code, so that the C code and the Go code
418 // can share the memstats variable for now.
419 //go:linkname getMstats runtime.getMstats
420 func getMstats() *mstats
{
424 // Temporary for gccgo until we port proc.go.
425 func setcpuprofilerate_m(hz
int32)
427 // Temporary for gccgo until we port mem_GOOS.go.
428 func sysAlloc(n
uintptr, sysStat
*uint64) unsafe
.Pointer
430 // Temporary for gccgo until we port proc.go, so that the C signal
431 // handler can call into cpuprof.
432 //go:linkname cpuprofAdd runtime.cpuprofAdd
433 func cpuprofAdd(stk
[]uintptr) {
437 // For gccgo until we port proc.go.
440 func UnlockOSThread()
445 func readgstatus(gp
*g
) uint32 {
446 return atomic
.Load(&gp
.atomicstatus
)
449 // Temporary for gccgo until we port malloc.go
450 func persistentalloc(size
, align
uintptr, sysStat
*uint64) unsafe
.Pointer
452 // Temporary for gccgo until we port mheap.go
453 func setprofilebucket(p unsafe
.Pointer
, b
*bucket
)
455 // Currently in proc.c.
456 func tracebackothers(*g
)
458 // Temporary for gccgo until we port mgc.go.
459 func setgcpercent(int32) int32
461 //go:linkname setGCPercent runtime_debug.setGCPercent
462 func setGCPercent(in
int32) (out
int32) {
463 return setgcpercent(in
)
466 // Temporary for gccgo until we port proc.go.
467 func setmaxthreads(int) int
469 //go:linkname setMaxThreads runtime_debug.setMaxThreads
470 func setMaxThreads(in
int) (out
int) {
471 return setmaxthreads(in
)
474 // Temporary for gccgo until we port atomic_pointer.go.
476 func atomicstorep(ptr unsafe
.Pointer
, new unsafe
.Pointer
) {
477 atomic
.StorepNoWB(noescape(ptr
), new)
480 // Temporary for gccgo until we port mbarrier.go
481 func writebarrierptr(dst
*uintptr, src
uintptr) {
485 // Temporary for gccgo until we port malloc.go
488 //go:linkname getZerobase runtime.getZerobase
489 func getZerobase() *uintptr {