LWG 3035. std::allocator's constructors should be constexpr
[official-gcc.git] / libgo / go / runtime / stubs.go
blobbda2c694ac67ab79c47c3752b83f693d9f1a08a2
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/sys"
9 "unsafe"
12 // Should be a built-in for unsafe.Pointer?
13 //go:nosplit
14 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
15 return unsafe.Pointer(uintptr(p) + x)
18 // getg returns the pointer to the current g.
19 // The compiler rewrites calls to this function into instructions
20 // that fetch the g directly (from TLS or from the dedicated register).
21 func getg() *g
23 // mcall switches from the g to the g0 stack and invokes fn(g),
24 // where g is the goroutine that made the call.
25 // mcall saves g's current PC/SP in g->sched so that it can be restored later.
26 // It is up to fn to arrange for that later execution, typically by recording
27 // g in a data structure, causing something to call ready(g) later.
28 // mcall returns to the original goroutine g later, when g has been rescheduled.
29 // fn must not return at all; typically it ends by calling schedule, to let the m
30 // run other goroutines.
32 // mcall can only be called from g stacks (not g0, not gsignal).
34 // This must NOT be go:noescape: if fn is a stack-allocated closure,
35 // fn puts g on a run queue, and g executes before fn returns, the
36 // closure will be invalidated while it is still executing.
37 func mcall(fn func(*g))
39 // systemstack runs fn on a system stack.
41 // It is common to use a func literal as the argument, in order
42 // to share inputs and outputs with the code around the call
43 // to system stack:
45 // ... set up y ...
46 // systemstack(func() {
47 // x = bigcall(y)
48 // })
49 // ... use x ...
51 // For the gc toolchain this permits running a function that requires
52 // additional stack space in a context where the stack can not be
53 // split. We don't really need additional stack space in gccgo, since
54 // stack splitting is handled separately. But to keep things looking
55 // the same, we do switch to the g0 stack here if necessary.
56 func systemstack(fn func()) {
57 gp := getg()
58 mp := gp.m
59 if gp == mp.g0 || gp == mp.gsignal {
60 fn()
61 } else if gp == mp.curg {
62 fn1 := func(origg *g) {
63 fn()
64 gogo(origg)
66 mcall(*(*func(*g))(noescape(unsafe.Pointer(&fn1))))
67 } else {
68 badsystemstack()
72 func badsystemstack() {
73 throw("systemstack called from unexpected goroutine")
76 // memclrNoHeapPointers clears n bytes starting at ptr.
78 // Usually you should use typedmemclr. memclrNoHeapPointers should be
79 // used only when the caller knows that *ptr contains no heap pointers
80 // because either:
82 // 1. *ptr is initialized memory and its type is pointer-free.
84 // 2. *ptr is uninitialized memory (e.g., memory that's being reused
85 // for a new allocation) and hence contains only "junk".
87 // in memclr_*.s
88 //go:noescape
89 func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
91 //go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
92 func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
93 memclrNoHeapPointers(ptr, n)
96 // memmove copies n bytes from "from" to "to".
97 //go:noescape
98 func memmove(to, from unsafe.Pointer, n uintptr)
100 //go:linkname reflect_memmove reflect.memmove
101 func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
102 memmove(to, from, n)
105 //go:noescape
106 //extern __builtin_memcmp
107 func memcmp(a, b unsafe.Pointer, size uintptr) int32
109 // exported value for testing
110 var hashLoad = float32(loadFactorNum) / float32(loadFactorDen)
112 //go:nosplit
113 func fastrand() uint32 {
114 mp := getg().m
115 // Implement xorshift64+: 2 32-bit xorshift sequences added together.
116 // Shift triplet [17,7,16] was calculated as indicated in Marsaglia's
117 // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
118 // This generator passes the SmallCrush suite, part of TestU01 framework:
119 // http://simul.iro.umontreal.ca/testu01/tu01.html
120 s1, s0 := mp.fastrand[0], mp.fastrand[1]
121 s1 ^= s1 << 17
122 s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16
123 mp.fastrand[0], mp.fastrand[1] = s0, s1
124 return s0 + s1
127 //go:nosplit
128 func fastrandn(n uint32) uint32 {
129 // This is similar to fastrand() % n, but faster.
130 // See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
131 return uint32(uint64(fastrand()) * uint64(n) >> 32)
134 //go:linkname sync_fastrand sync.fastrand
135 func sync_fastrand() uint32 { return fastrand() }
137 // in asm_*.s
138 //go:noescape
139 func memequal(a, b unsafe.Pointer, size uintptr) bool
141 // noescape hides a pointer from escape analysis. noescape is
142 // the identity function but escape analysis doesn't think the
143 // output depends on the input. noescape is inlined and currently
144 // compiles down to zero instructions.
145 // USE CAREFULLY!
146 //go:nosplit
147 func noescape(p unsafe.Pointer) unsafe.Pointer {
148 x := uintptr(p)
149 return unsafe.Pointer(x ^ 0)
152 //go:noescape
153 func jmpdefer(fv *funcval, argp uintptr)
154 func exit1(code int32)
155 func setg(gg *g)
157 //extern __builtin_trap
158 func breakpoint()
160 func asminit() {}
162 //go:linkname reflectcall reflect.call
163 //go:noescape
164 func reflectcall(fntype *functype, fn *funcval, isInterface, isMethod bool, params, results *unsafe.Pointer)
166 func procyield(cycles uint32)
168 type neverCallThisFunction struct{}
170 // goexit is the return stub at the top of every goroutine call stack.
171 // Each goroutine stack is constructed as if goexit called the
172 // goroutine's entry point function, so that when the entry point
173 // function returns, it will return to goexit, which will call goexit1
174 // to perform the actual exit.
176 // This function must never be called directly. Call goexit1 instead.
177 // gentraceback assumes that goexit terminates the stack. A direct
178 // call on the stack will cause gentraceback to stop walking the stack
179 // prematurely and if there is leftover state it may panic.
180 func goexit(neverCallThisFunction)
182 // publicationBarrier performs a store/store barrier (a "publication"
183 // or "export" barrier). Some form of synchronization is required
184 // between initializing an object and making that object accessible to
185 // another processor. Without synchronization, the initialization
186 // writes and the "publication" write may be reordered, allowing the
187 // other processor to follow the pointer and observe an uninitialized
188 // object. In general, higher-level synchronization should be used,
189 // such as locking or an atomic pointer write. publicationBarrier is
190 // for when those aren't an option, such as in the implementation of
191 // the memory manager.
193 // There's no corresponding barrier for the read side because the read
194 // side naturally has a data dependency order. All architectures that
195 // Go supports or seems likely to ever support automatically enforce
196 // data dependency ordering.
197 func publicationBarrier()
199 // getcallerpc returns the program counter (PC) of its caller's caller.
200 // getcallersp returns the stack pointer (SP) of its caller's caller.
201 // argp must be a pointer to the caller's first function argument.
202 // The implementation may or may not use argp, depending on
203 // the architecture. The implementation may be a compiler
204 // intrinsic; there is not necessarily code implementing this
205 // on every platform.
207 // For example:
209 // func f(arg1, arg2, arg3 int) {
210 // pc := getcallerpc()
211 // sp := getcallersp(unsafe.Pointer(&arg1))
212 // }
214 // These two lines find the PC and SP immediately following
215 // the call to f (where f will return).
217 // The call to getcallerpc and getcallersp must be done in the
218 // frame being asked about. It would not be correct for f to pass &arg1
219 // to another function g and let g call getcallerpc/getcallersp.
220 // The call inside g might return information about g's caller or
221 // information about f's caller or complete garbage.
223 // The result of getcallersp is correct at the time of the return,
224 // but it may be invalidated by any subsequent call to a function
225 // that might relocate the stack in order to grow or shrink it.
226 // A general rule is that the result of getcallersp should be used
227 // immediately and can only be passed to nosplit functions.
229 //go:noescape
230 func getcallerpc() uintptr
232 //go:noescape
233 func getcallersp(argp unsafe.Pointer) uintptr
235 func asmcgocall(fn, arg unsafe.Pointer) int32 {
236 throw("asmcgocall")
237 return 0
240 // argp used in Defer structs when there is no argp.
241 const _NoArgs = ^uintptr(0)
243 //extern __builtin_prefetch
244 func prefetch(addr unsafe.Pointer, rw int32, locality int32)
246 func prefetcht0(addr uintptr) {
247 prefetch(unsafe.Pointer(addr), 0, 3)
250 func prefetcht1(addr uintptr) {
251 prefetch(unsafe.Pointer(addr), 0, 2)
254 func prefetcht2(addr uintptr) {
255 prefetch(unsafe.Pointer(addr), 0, 1)
258 func prefetchnta(addr uintptr) {
259 prefetch(unsafe.Pointer(addr), 0, 0)
262 // round n up to a multiple of a. a must be a power of 2.
263 func round(n, a uintptr) uintptr {
264 return (n + a - 1) &^ (a - 1)
267 // checkASM returns whether assembly runtime checks have passed.
268 func checkASM() bool {
269 return true
272 func eqstring(x, y string) bool {
273 a := stringStructOf(&x)
274 b := stringStructOf(&y)
275 if a.len != b.len {
276 return false
278 if a.str == b.str {
279 return true
281 return memequal(a.str, b.str, uintptr(a.len))
284 // For gccgo this is in the C code.
285 func osyield()
287 // For gccgo this can be called directly.
288 //extern syscall
289 func syscall(trap uintptr, a1, a2, a3, a4, a5, a6 uintptr) uintptr
291 // For gccgo, to communicate from the C code to the Go code.
292 //go:linkname setIsCgo runtime.setIsCgo
293 func setIsCgo() {
294 iscgo = true
297 // For gccgo, to communicate from the C code to the Go code.
298 //go:linkname setCpuidECX runtime.setCpuidECX
299 func setCpuidECX(v uint32) {
300 cpuid_ecx = v
303 // For gccgo, to communicate from the C code to the Go code.
304 //go:linkname setSupportAES runtime.setSupportAES
305 func setSupportAES(v bool) {
306 support_aes = v
309 // Here for gccgo.
310 func errno() int
312 // Temporary for gccgo until we port proc.go.
313 func entersyscall(int32)
314 func entersyscallblock(int32)
316 // For gccgo to call from C code, so that the C code and the Go code
317 // can share the memstats variable for now.
318 //go:linkname getMstats runtime.getMstats
319 func getMstats() *mstats {
320 return &memstats
323 // Get signal trampoline, written in C.
324 func getSigtramp() uintptr
326 // The sa_handler field is generally hidden in a union, so use C accessors.
327 //go:noescape
328 func getSigactionHandler(*_sigaction) uintptr
330 //go:noescape
331 func setSigactionHandler(*_sigaction, uintptr)
333 // Retrieve fields from the siginfo_t and ucontext_t pointers passed
334 // to a signal handler using C, as they are often hidden in a union.
335 // Returns and, if available, PC where signal occurred.
336 func getSiginfo(*_siginfo_t, unsafe.Pointer) (sigaddr uintptr, sigpc uintptr)
338 // Implemented in C for gccgo.
339 func dumpregs(*_siginfo_t, unsafe.Pointer)
341 // Temporary for gccgo until we port proc.go.
342 //go:linkname getsched runtime.getsched
343 func getsched() *schedt {
344 return &sched
347 // Temporary for gccgo until we port proc.go.
348 //go:linkname getCgoHasExtraM runtime.getCgoHasExtraM
349 func getCgoHasExtraM() *bool {
350 return &cgoHasExtraM
353 // Temporary for gccgo until we port proc.go.
354 //go:linkname getAllP runtime.getAllP
355 func getAllP() **p {
356 return &allp[0]
359 // Temporary for gccgo until we port proc.go.
360 //go:linkname allocg runtime.allocg
361 func allocg() *g {
362 return new(g)
365 // Temporary for gccgo until we port the garbage collector.
366 //go:linkname getallglen runtime.getallglen
367 func getallglen() uintptr {
368 return allglen
371 // Temporary for gccgo until we port the garbage collector.
372 //go:linkname getallg runtime.getallg
373 func getallg(i int) *g {
374 return allgs[i]
377 // Temporary for gccgo until we port the garbage collector.
378 //go:linkname getallm runtime.getallm
379 func getallm() *m {
380 return allm
383 // Throw and rethrow an exception.
384 func throwException()
385 func rethrowException()
387 // Fetch the size and required alignment of the _Unwind_Exception type
388 // used by the stack unwinder.
389 func unwindExceptionSize() uintptr
391 // Temporary for gccgo until C code no longer needs it.
392 //go:nosplit
393 //go:linkname getPanicking runtime.getPanicking
394 func getPanicking() uint32 {
395 return panicking
398 // Called by C code to set the number of CPUs.
399 //go:linkname setncpu runtime.setncpu
400 func setncpu(n int32) {
401 ncpu = n
404 // Called by C code to set the page size.
405 //go:linkname setpagesize runtime.setpagesize
406 func setpagesize(s uintptr) {
407 if physPageSize == 0 {
408 physPageSize = s
412 // Called by C code during library initialization.
413 //go:linkname runtime_m0 runtime.runtime_m0
414 func runtime_m0() *m {
415 return &m0
418 // Temporary for gccgo until we port mgc.go.
419 //go:linkname runtime_g0 runtime.runtime_g0
420 func runtime_g0() *g {
421 return &g0
424 const uintptrMask = 1<<(8*sys.PtrSize) - 1
426 type bitvector struct {
427 n int32 // # of bits
428 bytedata *uint8
431 // bool2int returns 0 if x is false or 1 if x is true.
432 func bool2int(x bool) int {
433 if x {
434 return 1
436 return 0