1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "runtime/internal/atomic"
13 // Should be a built-in for unsafe.Pointer?
15 func add(p unsafe
.Pointer
, x
uintptr) unsafe
.Pointer
{
16 return unsafe
.Pointer(uintptr(p
) + x
)
19 // getg returns the pointer to the current g.
20 // The compiler rewrites calls to this function into instructions
21 // that fetch the g directly (from TLS or from the dedicated register).
24 // mcall switches from the g to the g0 stack and invokes fn(g),
25 // where g is the goroutine that made the call.
26 // mcall saves g's current PC/SP in g->sched so that it can be restored later.
27 // It is up to fn to arrange for that later execution, typically by recording
28 // g in a data structure, causing something to call ready(g) later.
29 // mcall returns to the original goroutine g later, when g has been rescheduled.
30 // fn must not return at all; typically it ends by calling schedule, to let the m
31 // run other goroutines.
33 // mcall can only be called from g stacks (not g0, not gsignal).
35 // This must NOT be go:noescape: if fn is a stack-allocated closure,
36 // fn puts g on a run queue, and g executes before fn returns, the
37 // closure will be invalidated while it is still executing.
38 func mcall(fn
func(*g
))
40 // systemstack runs fn on a system stack.
42 // It is common to use a func literal as the argument, in order
43 // to share inputs and outputs with the code around the call
47 // systemstack(func() {
52 // For the gc toolchain this permits running a function that requires
53 // additional stack space in a context where the stack can not be
54 // split. We don't really need additional stack space in gccgo, since
55 // stack splitting is handled separately. But to keep things looking
56 // the same, we do switch to the g0 stack here if necessary.
57 func systemstack(fn
func()) {
60 if gp
== mp
.g0 || gp
== mp
.gsignal
{
62 } else if gp
== mp
.curg
{
63 fn1
:= func(origg
*g
) {
67 mcall(*(*func(*g
))(noescape(unsafe
.Pointer(&fn1
))))
73 func badsystemstack() {
74 throw("systemstack called from unexpected goroutine")
77 // memclrNoHeapPointers clears n bytes starting at ptr.
79 // Usually you should use typedmemclr. memclrNoHeapPointers should be
80 // used only when the caller knows that *ptr contains no heap pointers
83 // 1. *ptr is initialized memory and its type is pointer-free.
85 // 2. *ptr is uninitialized memory (e.g., memory that's being reused
86 // for a new allocation) and hence contains only "junk".
90 func memclrNoHeapPointers(ptr unsafe
.Pointer
, n
uintptr)
92 //go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
93 func reflect_memclrNoHeapPointers(ptr unsafe
.Pointer
, n
uintptr) {
94 memclrNoHeapPointers(ptr
, n
)
97 // memmove copies n bytes from "from" to "to".
99 func memmove(to
, from unsafe
.Pointer
, n
uintptr)
101 //go:linkname reflect_memmove reflect.memmove
102 func reflect_memmove(to
, from unsafe
.Pointer
, n
uintptr) {
107 //extern __builtin_memcmp
108 func memcmp(a
, b unsafe
.Pointer
, size
uintptr) int32
110 // exported value for testing
111 var hashLoad
= float32(loadFactorNum
) / float32(loadFactorDen
)
114 func fastrand() uint32 {
116 // Implement xorshift64+: 2 32-bit xorshift sequences added together.
117 // Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
118 // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
119 // This generator passes the SmallCrush suite, part of TestU01 framework:
120 // http://simul.iro.umontreal.ca/testu01/tu01.html
121 s1
, s0
:= mp
.fastrand
[0], mp
.fastrand
[1]
123 s1
= s1
^ s0
^ s1
>>7 ^ s0
>>16
124 mp
.fastrand
[0], mp
.fastrand
[1] = s0
, s1
129 func fastrandn(n
uint32) uint32 {
130 // This is similar to fastrand() % n, but faster.
131 // See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
132 return uint32(uint64(fastrand()) * uint64(n
) >> 32)
135 //go:linkname sync_fastrand sync.fastrand
136 func sync_fastrand() uint32 { return fastrand() }
140 func memequal(a
, b unsafe
.Pointer
, size
uintptr) bool
142 // noescape hides a pointer from escape analysis. noescape is
143 // the identity function but escape analysis doesn't think the
144 // output depends on the input. noescape is inlined and currently
145 // compiles down to zero instructions.
148 func noescape(p unsafe
.Pointer
) unsafe
.Pointer
{
150 return unsafe
.Pointer(x
^ 0)
154 func jmpdefer(fv
*funcval
, argp
uintptr)
155 func exit1(code
int32)
158 //extern __builtin_trap
163 //go:linkname reflectcall reflect.call
165 func reflectcall(fntype
*functype
, fn
*funcval
, isInterface
, isMethod
bool, params
, results
*unsafe
.Pointer
)
167 func procyield(cycles
uint32)
169 type neverCallThisFunction
struct{}
171 // goexit is the return stub at the top of every goroutine call stack.
172 // Each goroutine stack is constructed as if goexit called the
173 // goroutine's entry point function, so that when the entry point
174 // function returns, it will return to goexit, which will call goexit1
175 // to perform the actual exit.
177 // This function must never be called directly. Call goexit1 instead.
178 // gentraceback assumes that goexit terminates the stack. A direct
179 // call on the stack will cause gentraceback to stop walking the stack
180 // prematurely and if there is leftover state it may panic.
181 func goexit(neverCallThisFunction
)
183 // publicationBarrier performs a store/store barrier (a "publication"
184 // or "export" barrier). Some form of synchronization is required
185 // between initializing an object and making that object accessible to
186 // another processor. Without synchronization, the initialization
187 // writes and the "publication" write may be reordered, allowing the
188 // other processor to follow the pointer and observe an uninitialized
189 // object. In general, higher-level synchronization should be used,
190 // such as locking or an atomic pointer write. publicationBarrier is
191 // for when those aren't an option, such as in the implementation of
192 // the memory manager.
194 // There's no corresponding barrier for the read side because the read
195 // side naturally has a data dependency order. All architectures that
196 // Go supports or seems likely to ever support automatically enforce
197 // data dependency ordering.
198 func publicationBarrier()
200 // getcallerpc returns the program counter (PC) of its caller's caller.
201 // getcallersp returns the stack pointer (SP) of its caller's caller.
202 // argp must be a pointer to the caller's first function argument.
203 // The implementation may or may not use argp, depending on
204 // the architecture. The implementation may be a compiler
205 // intrinsic; there is not necessarily code implementing this
206 // on every platform.
210 // func f(arg1, arg2, arg3 int) {
211 // pc := getcallerpc()
212 // sp := getcallersp(unsafe.Pointer(&arg1))
215 // These two lines find the PC and SP immediately following
216 // the call to f (where f will return).
218 // The call to getcallerpc and getcallersp must be done in the
219 // frame being asked about. It would not be correct for f to pass &arg1
220 // to another function g and let g call getcallerpc/getcallersp.
221 // The call inside g might return information about g's caller or
222 // information about f's caller or complete garbage.
224 // The result of getcallersp is correct at the time of the return,
225 // but it may be invalidated by any subsequent call to a function
226 // that might relocate the stack in order to grow or shrink it.
227 // A general rule is that the result of getcallersp should be used
228 // immediately and can only be passed to nosplit functions.
231 func getcallerpc() uintptr
234 func getcallersp(argp unsafe
.Pointer
) uintptr
236 func asmcgocall(fn
, arg unsafe
.Pointer
) int32 {
241 // argp used in Defer structs when there is no argp.
242 const _NoArgs
= ^uintptr(0)
244 //extern __builtin_prefetch
245 func prefetch(addr unsafe
.Pointer
, rw
int32, locality
int32)
247 func prefetcht0(addr
uintptr) {
248 prefetch(unsafe
.Pointer(addr
), 0, 3)
251 func prefetcht1(addr
uintptr) {
252 prefetch(unsafe
.Pointer(addr
), 0, 2)
255 func prefetcht2(addr
uintptr) {
256 prefetch(unsafe
.Pointer(addr
), 0, 1)
259 func prefetchnta(addr
uintptr) {
260 prefetch(unsafe
.Pointer(addr
), 0, 0)
263 // round n up to a multiple of a. a must be a power of 2.
264 func round(n
, a
uintptr) uintptr {
265 return (n
+ a
- 1) &^ (a
- 1)
268 // checkASM returns whether assembly runtime checks have passed.
269 func checkASM() bool {
273 func eqstring(x
, y
string) bool {
274 a
:= stringStructOf(&x
)
275 b
:= stringStructOf(&y
)
282 return memequal(a
.str
, b
.str
, uintptr(a
.len))
285 // For gccgo this is in the C code.
288 // For gccgo this can be called directly.
290 func syscall(trap
uintptr, a1
, a2
, a3
, a4
, a5
, a6
uintptr) uintptr
292 // For gccgo, to communicate from the C code to the Go code.
293 //go:linkname setIsCgo runtime.setIsCgo
298 // For gccgo, to communicate from the C code to the Go code.
299 //go:linkname setCpuidECX runtime.setCpuidECX
300 func setCpuidECX(v
uint32) {
304 // For gccgo, to communicate from the C code to the Go code.
305 //go:linkname setSupportAES runtime.setSupportAES
306 func setSupportAES(v
bool) {
310 // Here for gccgo until we port atomic_pointer.go and mgc.go.
312 func casp(ptr
*unsafe
.Pointer
, old
, new unsafe
.Pointer
) bool {
313 if !atomic
.Casp1((*unsafe
.Pointer
)(noescape(unsafe
.Pointer(ptr
))), noescape(old
), new) {
319 // Here for gccgo until we port lock_*.go.
321 func unlock(l
*mutex
)
326 // Temporary for gccgo until we port proc.go.
327 func entersyscall(int32)
328 func entersyscallblock(int32)
330 // Here for gccgo until we port mgc.go.
333 // For gccgo to call from C code, so that the C code and the Go code
334 // can share the memstats variable for now.
335 //go:linkname getMstats runtime.getMstats
336 func getMstats() *mstats
{
340 // Temporary for gccgo until we port mem_GOOS.go.
341 func sysAlloc(n
uintptr, sysStat
*uint64) unsafe
.Pointer
342 func sysFree(v unsafe
.Pointer
, n
uintptr, sysStat
*uint64)
344 // Temporary for gccgo until we port malloc.go
345 func persistentalloc(size
, align
uintptr, sysStat
*uint64) unsafe
.Pointer
347 // Temporary for gccgo until we port mheap.go
348 func setprofilebucket(p unsafe
.Pointer
, b
*bucket
)
350 // Temporary for gccgo until we port atomic_pointer.go.
352 func atomicstorep(ptr unsafe
.Pointer
, new unsafe
.Pointer
) {
353 atomic
.StorepNoWB(noescape(ptr
), new)
356 // Get signal trampoline, written in C.
357 func getSigtramp() uintptr
359 // The sa_handler field is generally hidden in a union, so use C accessors.
361 func getSigactionHandler(*_sigaction
) uintptr
364 func setSigactionHandler(*_sigaction
, uintptr)
366 // Retrieve fields from the siginfo_t and ucontext_t pointers passed
367 // to a signal handler using C, as they are often hidden in a union.
368 // Returns and, if available, PC where signal occurred.
369 func getSiginfo(*_siginfo_t
, unsafe
.Pointer
) (sigaddr
uintptr, sigpc
uintptr)
371 // Implemented in C for gccgo.
372 func dumpregs(*_siginfo_t
, unsafe
.Pointer
)
374 // Temporary for gccgo until we port proc.go.
375 //go:linkname getsched runtime.getsched
376 func getsched() *schedt
{
380 // Temporary for gccgo until we port proc.go.
381 //go:linkname getCgoHasExtraM runtime.getCgoHasExtraM
382 func getCgoHasExtraM() *bool {
386 // Temporary for gccgo until we port proc.go.
387 //go:linkname getAllP runtime.getAllP
392 // Temporary for gccgo until we port proc.go.
393 //go:linkname allocg runtime.allocg
398 // Temporary for gccgo until we port the garbage collector.
399 //go:linkname getallglen runtime.getallglen
400 func getallglen() uintptr {
404 // Temporary for gccgo until we port the garbage collector.
405 //go:linkname getallg runtime.getallg
406 func getallg(i
int) *g
{
410 // Temporary for gccgo until we port the garbage collector.
411 //go:linkname getallm runtime.getallm
416 // Throw and rethrow an exception.
417 func throwException()
418 func rethrowException()
420 // Fetch the size and required alignment of the _Unwind_Exception type
421 // used by the stack unwinder.
422 func unwindExceptionSize() uintptr
424 // Temporary for gccgo until C code no longer needs it.
426 //go:linkname getPanicking runtime.getPanicking
427 func getPanicking() uint32 {
431 // Called by C code to set the number of CPUs.
432 //go:linkname setncpu runtime.setncpu
433 func setncpu(n
int32) {
437 // Called by C code to set the page size.
438 //go:linkname setpagesize runtime.setpagesize
439 func setpagesize(s
uintptr) {
440 if physPageSize
== 0 {
445 // Called by C code during library initialization.
446 //go:linkname runtime_m0 runtime.runtime_m0
447 func runtime_m0() *m
{
451 // Temporary for gccgo until we port mgc.go.
452 //go:linkname runtime_g0 runtime.runtime_g0
453 func runtime_g0() *g
{
457 const uintptrMask
= 1<<(8*sys
.PtrSize
) - 1
459 type bitvector
struct {
464 // bool2int returns 0 if x is false or 1 if x is true.
465 func bool2int(x
bool) int {