[ARM] Fix typo in comment in arm_expand_prologue
[official-gcc.git] / libgo / go / runtime / stubs.go
bloba3d091811d5d0c99aaa95ff78c7d05b44990493e
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
13 // Should be a built-in for unsafe.Pointer?
14 //go:nosplit
15 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
16 return unsafe.Pointer(uintptr(p) + x)
19 // getg returns the pointer to the current g.
20 // The compiler rewrites calls to this function into instructions
21 // that fetch the g directly (from TLS or from the dedicated register).
22 func getg() *g
24 // mcall switches from the g to the g0 stack and invokes fn(g),
25 // where g is the goroutine that made the call.
26 // mcall saves g's current PC/SP in g->sched so that it can be restored later.
27 // It is up to fn to arrange for that later execution, typically by recording
28 // g in a data structure, causing something to call ready(g) later.
29 // mcall returns to the original goroutine g later, when g has been rescheduled.
30 // fn must not return at all; typically it ends by calling schedule, to let the m
31 // run other goroutines.
33 // mcall can only be called from g stacks (not g0, not gsignal).
35 // This must NOT be go:noescape: if fn is a stack-allocated closure,
36 // fn puts g on a run queue, and g executes before fn returns, the
37 // closure will be invalidated while it is still executing.
38 func mcall(fn func(*g))
40 // systemstack runs fn on a system stack.
42 // It is common to use a func literal as the argument, in order
43 // to share inputs and outputs with the code around the call
44 // to system stack:
46 // ... set up y ...
47 // systemstack(func() {
48 // x = bigcall(y)
49 // })
50 // ... use x ...
52 // For the gc toolchain this permits running a function that requires
53 // additional stack space in a context where the stack can not be
54 // split. We don't really need additional stack space in gccgo, since
55 // stack splitting is handled separately. But to keep things looking
56 // the same, we do switch to the g0 stack here if necessary.
57 func systemstack(fn func()) {
58 gp := getg()
59 mp := gp.m
60 if gp == mp.g0 || gp == mp.gsignal {
61 fn()
62 } else if gp == mp.curg {
63 mcall(func(origg *g) {
64 fn()
65 gogo(origg)
67 } else {
68 badsystemstack()
72 func badsystemstack() {
73 throw("systemstack called from unexpected goroutine")
76 // memclrNoHeapPointers clears n bytes starting at ptr.
78 // Usually you should use typedmemclr. memclrNoHeapPointers should be
79 // used only when the caller knows that *ptr contains no heap pointers
80 // because either:
82 // 1. *ptr is initialized memory and its type is pointer-free.
84 // 2. *ptr is uninitialized memory (e.g., memory that's being reused
85 // for a new allocation) and hence contains only "junk".
87 // in memclr_*.s
88 //go:noescape
89 func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
91 //go:linkname reflect_memclrNoHeapPointers reflect.memclrNoHeapPointers
92 func reflect_memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) {
93 memclrNoHeapPointers(ptr, n)
96 // memmove copies n bytes from "from" to "to".
97 //go:noescape
98 func memmove(to, from unsafe.Pointer, n uintptr)
100 //go:linkname reflect_memmove reflect.memmove
101 func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
102 memmove(to, from, n)
105 //go:noescape
106 //extern __builtin_memcmp
107 func memcmp(a, b unsafe.Pointer, size uintptr) int32
109 // exported value for testing
110 var hashLoad = loadFactor
112 // in asm_*.s
113 func fastrand() uint32
115 //go:linkname sync_fastrand sync.fastrand
116 func sync_fastrand() uint32 { return fastrand() }
118 // in asm_*.s
119 //go:noescape
120 func memequal(a, b unsafe.Pointer, size uintptr) bool
122 // noescape hides a pointer from escape analysis. noescape is
123 // the identity function but escape analysis doesn't think the
124 // output depends on the input. noescape is inlined and currently
125 // compiles down to zero instructions.
126 // USE CAREFULLY!
127 //go:nosplit
128 func noescape(p unsafe.Pointer) unsafe.Pointer {
129 x := uintptr(p)
130 return unsafe.Pointer(x ^ 0)
133 //go:noescape
134 func jmpdefer(fv *funcval, argp uintptr)
135 func exit1(code int32)
136 func setg(gg *g)
138 //extern __builtin_trap
139 func breakpoint()
141 func asminit() {}
143 //go:linkname reflectcall reflect.call
144 func reflectcall(fntype *functype, fn *funcval, isInterface, isMethod bool, params, results *unsafe.Pointer)
146 func procyield(cycles uint32)
148 type neverCallThisFunction struct{}
150 // goexit is the return stub at the top of every goroutine call stack.
151 // Each goroutine stack is constructed as if goexit called the
152 // goroutine's entry point function, so that when the entry point
153 // function returns, it will return to goexit, which will call goexit1
154 // to perform the actual exit.
156 // This function must never be called directly. Call goexit1 instead.
157 // gentraceback assumes that goexit terminates the stack. A direct
158 // call on the stack will cause gentraceback to stop walking the stack
159 // prematurely and if there are leftover stack barriers it may panic.
160 func goexit(neverCallThisFunction)
162 // publicationBarrier performs a store/store barrier (a "publication"
163 // or "export" barrier). Some form of synchronization is required
164 // between initializing an object and making that object accessible to
165 // another processor. Without synchronization, the initialization
166 // writes and the "publication" write may be reordered, allowing the
167 // other processor to follow the pointer and observe an uninitialized
168 // object. In general, higher-level synchronization should be used,
169 // such as locking or an atomic pointer write. publicationBarrier is
170 // for when those aren't an option, such as in the implementation of
171 // the memory manager.
173 // There's no corresponding barrier for the read side because the read
174 // side naturally has a data dependency order. All architectures that
175 // Go supports or seems likely to ever support automatically enforce
176 // data dependency ordering.
177 func publicationBarrier()
179 //go:noescape
180 func setcallerpc(argp unsafe.Pointer, pc uintptr)
182 // getcallerpc returns the program counter (PC) of its caller's caller.
183 // getcallersp returns the stack pointer (SP) of its caller's caller.
184 // For both, the argp must be a pointer to the caller's first function argument.
185 // The implementation may or may not use argp, depending on
186 // the architecture.
188 // For example:
190 // func f(arg1, arg2, arg3 int) {
191 // pc := getcallerpc(unsafe.Pointer(&arg1))
192 // sp := getcallersp(unsafe.Pointer(&arg1))
193 // }
195 // These two lines find the PC and SP immediately following
196 // the call to f (where f will return).
198 // The call to getcallerpc and getcallersp must be done in the
199 // frame being asked about. It would not be correct for f to pass &arg1
200 // to another function g and let g call getcallerpc/getcallersp.
201 // The call inside g might return information about g's caller or
202 // information about f's caller or complete garbage.
204 // The result of getcallersp is correct at the time of the return,
205 // but it may be invalidated by any subsequent call to a function
206 // that might relocate the stack in order to grow or shrink it.
207 // A general rule is that the result of getcallersp should be used
208 // immediately and can only be passed to nosplit functions.
210 //go:noescape
211 func getcallerpc(argp unsafe.Pointer) uintptr
213 //go:noescape
214 func getcallersp(argp unsafe.Pointer) uintptr
216 // argp used in Defer structs when there is no argp.
217 const _NoArgs = ^uintptr(0)
219 //go:linkname time_now time.now
220 func time_now() (sec int64, nsec int32)
222 //extern __builtin_prefetch
223 func prefetch(addr unsafe.Pointer, rw int32, locality int32)
225 func prefetcht0(addr uintptr) {
226 prefetch(unsafe.Pointer(addr), 0, 3)
229 func prefetcht1(addr uintptr) {
230 prefetch(unsafe.Pointer(addr), 0, 2)
233 func prefetcht2(addr uintptr) {
234 prefetch(unsafe.Pointer(addr), 0, 1)
237 func prefetchnta(addr uintptr) {
238 prefetch(unsafe.Pointer(addr), 0, 0)
241 // For gccgo, expose this for C callers.
242 //go:linkname unixnanotime runtime.unixnanotime
243 func unixnanotime() int64 {
244 sec, nsec := time_now()
245 return sec*1e9 + int64(nsec)
248 // round n up to a multiple of a. a must be a power of 2.
249 func round(n, a uintptr) uintptr {
250 return (n + a - 1) &^ (a - 1)
253 // checkASM returns whether assembly runtime checks have passed.
254 func checkASM() bool {
255 return true
258 func eqstring(x, y string) bool {
259 a := stringStructOf(&x)
260 b := stringStructOf(&y)
261 if a.len != b.len {
262 return false
264 if a.str == b.str {
265 return true
267 return memequal(a.str, b.str, uintptr(a.len))
270 // For gccgo this is in the C code.
271 func osyield()
273 // For gccgo this can be called directly.
274 //extern syscall
275 func syscall(trap uintptr, a1, a2, a3, a4, a5, a6 uintptr) uintptr
277 // For gccgo, to communicate from the C code to the Go code.
278 //go:linkname setIsCgo runtime.setIsCgo
279 func setIsCgo() {
280 iscgo = true
283 // For gccgo, to communicate from the C code to the Go code.
284 //go:linkname setCpuidECX runtime.setCpuidECX
285 func setCpuidECX(v uint32) {
286 cpuid_ecx = v
289 // For gccgo, to communicate from the C code to the Go code.
290 //go:linkname setSupportAES runtime.setSupportAES
291 func setSupportAES(v bool) {
292 support_aes = v
295 // Here for gccgo until we port atomic_pointer.go and mgc.go.
296 //go:nosplit
297 func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
298 if !atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) {
299 return false
301 return true
304 // Here for gccgo until we port lock_*.go.
305 func lock(l *mutex)
306 func unlock(l *mutex)
308 // Here for gccgo.
309 func errno() int
311 // Temporary for gccgo until we port proc.go.
312 func entersyscall(int32)
313 func entersyscallblock(int32)
315 // Here for gccgo until we port mgc.go.
316 func GC()
318 // For gccgo to call from C code.
319 //go:linkname acquireWorldsema runtime.acquireWorldsema
320 func acquireWorldsema() {
321 semacquire(&worldsema, 0)
324 // For gccgo to call from C code.
325 //go:linkname releaseWorldsema runtime.releaseWorldsema
326 func releaseWorldsema() {
327 semrelease(&worldsema)
330 // For gccgo to call from C code, so that the C code and the Go code
331 // can share the memstats variable for now.
332 //go:linkname getMstats runtime.getMstats
333 func getMstats() *mstats {
334 return &memstats
337 // Temporary for gccgo until we port mem_GOOS.go.
338 func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer
339 func sysFree(v unsafe.Pointer, n uintptr, sysStat *uint64)
341 // Temporary for gccgo until we port malloc.go
342 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
344 // Temporary for gccgo until we port mheap.go
345 func setprofilebucket(p unsafe.Pointer, b *bucket)
347 // Temporary for gccgo until we port atomic_pointer.go.
348 //go:nosplit
349 func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) {
350 atomic.StorepNoWB(noescape(ptr), new)
353 // Get signal trampoline, written in C.
354 func getSigtramp() uintptr
356 // The sa_handler field is generally hidden in a union, so use C accessors.
357 func getSigactionHandler(*_sigaction) uintptr
358 func setSigactionHandler(*_sigaction, uintptr)
360 // Retrieve fields from the siginfo_t and ucontext_t pointers passed
361 // to a signal handler using C, as they are often hidden in a union.
362 // Returns and, if available, PC where signal occurred.
363 func getSiginfo(*_siginfo_t, unsafe.Pointer) (sigaddr uintptr, sigpc uintptr)
365 // Implemented in C for gccgo.
366 func dumpregs(*_siginfo_t, unsafe.Pointer)
368 // Temporary for gccgo until we port proc.go.
369 //go:linkname getsched runtime.getsched
370 func getsched() *schedt {
371 return &sched
374 // Temporary for gccgo until we port proc.go.
375 //go:linkname getCgoHasExtraM runtime.getCgoHasExtraM
376 func getCgoHasExtraM() *bool {
377 return &cgoHasExtraM
380 // Temporary for gccgo until we port proc.go.
381 //go:linkname getAllP runtime.getAllP
382 func getAllP() **p {
383 return &allp[0]
386 // Temporary for gccgo until we port proc.go.
387 //go:linkname allocg runtime.allocg
388 func allocg() *g {
389 return new(g)
392 // Temporary for gccgo until we port the garbage collector.
393 //go:linkname getallglen runtime.getallglen
394 func getallglen() uintptr {
395 return allglen
398 // Temporary for gccgo until we port the garbage collector.
399 //go:linkname getallg runtime.getallg
400 func getallg(i int) *g {
401 return allgs[i]
404 // Temporary for gccgo until we port the garbage collector.
405 //go:linkname getallm runtime.getallm
406 func getallm() *m {
407 return allm
410 // Throw and rethrow an exception.
411 func throwException()
412 func rethrowException()
414 // Fetch the size and required alignment of the _Unwind_Exception type
415 // used by the stack unwinder.
416 func unwindExceptionSize() uintptr
418 // Temporary for gccgo until C code no longer needs it.
419 //go:nosplit
420 //go:linkname getPanicking runtime.getPanicking
421 func getPanicking() uint32 {
422 return panicking
425 // Temporary for gccgo until we initialize ncpu in Go.
426 //go:linkname setncpu runtime.setncpu
427 func setncpu(n int32) {
428 ncpu = n
431 // Temporary for gccgo until we reliably initialize physPageSize in Go.
432 //go:linkname setpagesize runtime.setpagesize
433 func setpagesize(s uintptr) {
434 if physPageSize == 0 {
435 physPageSize = s
439 // Temporary for gccgo until we port more of proc.go.
440 func sigprofNonGoPC(pc uintptr) {
443 // Temporary for gccgo until we port mgc.go.
444 //go:linkname runtime_m0 runtime.runtime_m0
445 func runtime_m0() *m {
446 return &m0
449 // Temporary for gccgo until we port mgc.go.
450 //go:linkname runtime_g0 runtime.runtime_g0
451 func runtime_g0() *g {
452 return &g0
455 const uintptrMask = 1<<(8*sys.PtrSize) - 1
457 type bitvector struct {
458 n int32 // # of bits
459 bytedata *uint8