runtime: copy runtime package time code from Go 1.7
[official-gcc.git] / libgo / go / runtime / stubs.go
blob477c6be1ab480d93e19d67a7607faf061182b4dd
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 import (
8 "runtime/internal/atomic"
9 "runtime/internal/sys"
10 "unsafe"
13 // Should be a built-in for unsafe.Pointer?
14 //go:nosplit
15 func add(p unsafe.Pointer, x uintptr) unsafe.Pointer {
16 return unsafe.Pointer(uintptr(p) + x)
19 // getg returns the pointer to the current g.
20 // The compiler rewrites calls to this function into instructions
21 // that fetch the g directly (from TLS or from the dedicated register).
22 func getg() *g
24 // mcall switches from the g to the g0 stack and invokes fn(g),
25 // where g is the goroutine that made the call.
26 // mcall saves g's current PC/SP in g->sched so that it can be restored later.
27 // It is up to fn to arrange for that later execution, typically by recording
28 // g in a data structure, causing something to call ready(g) later.
29 // mcall returns to the original goroutine g later, when g has been rescheduled.
30 // fn must not return at all; typically it ends by calling schedule, to let the m
31 // run other goroutines.
33 // mcall can only be called from g stacks (not g0, not gsignal).
35 // This must NOT be go:noescape: if fn is a stack-allocated closure,
36 // fn puts g on a run queue, and g executes before fn returns, the
37 // closure will be invalidated while it is still executing.
38 func mcall(fn func(*g))
40 // systemstack runs fn on a system stack.
42 // It is common to use a func literal as the argument, in order
43 // to share inputs and outputs with the code around the call
44 // to system stack:
46 // ... set up y ...
47 // systemstack(func() {
48 // x = bigcall(y)
49 // })
50 // ... use x ...
52 // For the gc toolchain this permits running a function that requires
53 // additional stack space in a context where the stack can not be
54 // split. For gccgo, however, stack splitting is not managed by the
55 // Go runtime. In effect, all stacks are system stacks. So this gccgo
56 // version just runs the function.
57 func systemstack(fn func()) {
58 fn()
61 func badsystemstack() {
62 throw("systemstack called from unexpected goroutine")
65 // memclr clears n bytes starting at ptr.
66 // in memclr_*.s
67 //go:noescape
68 func memclr(ptr unsafe.Pointer, n uintptr)
70 //go:linkname reflect_memclr reflect.memclr
71 func reflect_memclr(ptr unsafe.Pointer, n uintptr) {
72 memclr(ptr, n)
75 // memmove copies n bytes from "from" to "to".
76 // in memmove_*.s
77 //go:noescape
78 func memmove(to, from unsafe.Pointer, n uintptr)
80 //go:linkname reflect_memmove reflect.memmove
81 func reflect_memmove(to, from unsafe.Pointer, n uintptr) {
82 memmove(to, from, n)
85 // exported value for testing
86 var hashLoad = loadFactor
88 // in asm_*.s
89 func fastrand1() uint32
91 // in asm_*.s
92 //go:noescape
93 func memequal(a, b unsafe.Pointer, size uintptr) bool
95 // noescape hides a pointer from escape analysis. noescape is
96 // the identity function but escape analysis doesn't think the
97 // output depends on the input. noescape is inlined and currently
98 // compiles down to a single xor instruction.
99 // USE CAREFULLY!
100 //go:nosplit
101 func noescape(p unsafe.Pointer) unsafe.Pointer {
102 x := uintptr(p)
103 return unsafe.Pointer(x ^ 0)
106 func mincore(addr unsafe.Pointer, n uintptr, dst *byte) int32
108 //go:noescape
109 func jmpdefer(fv *funcval, argp uintptr)
110 func exit1(code int32)
111 func asminit()
112 func setg(gg *g)
113 func breakpoint()
115 // reflectcall calls fn with a copy of the n argument bytes pointed at by arg.
116 // After fn returns, reflectcall copies n-retoffset result bytes
117 // back into arg+retoffset before returning. If copying result bytes back,
118 // the caller should pass the argument frame type as argtype, so that
119 // call can execute appropriate write barriers during the copy.
120 // Package reflect passes a frame type. In package runtime, there is only
121 // one call that copies results back, in cgocallbackg1, and it does NOT pass a
122 // frame type, meaning there are no write barriers invoked. See that call
123 // site for justification.
124 func reflectcall(argtype *_type, fn, arg unsafe.Pointer, argsize uint32, retoffset uint32)
126 func procyield(cycles uint32)
128 type neverCallThisFunction struct{}
130 // goexit is the return stub at the top of every goroutine call stack.
131 // Each goroutine stack is constructed as if goexit called the
132 // goroutine's entry point function, so that when the entry point
133 // function returns, it will return to goexit, which will call goexit1
134 // to perform the actual exit.
136 // This function must never be called directly. Call goexit1 instead.
137 // gentraceback assumes that goexit terminates the stack. A direct
138 // call on the stack will cause gentraceback to stop walking the stack
139 // prematurely and if there are leftover stack barriers it may panic.
140 func goexit(neverCallThisFunction)
142 // publicationBarrier performs a store/store barrier (a "publication"
143 // or "export" barrier). Some form of synchronization is required
144 // between initializing an object and making that object accessible to
145 // another processor. Without synchronization, the initialization
146 // writes and the "publication" write may be reordered, allowing the
147 // other processor to follow the pointer and observe an uninitialized
148 // object. In general, higher-level synchronization should be used,
149 // such as locking or an atomic pointer write. publicationBarrier is
150 // for when those aren't an option, such as in the implementation of
151 // the memory manager.
153 // There's no corresponding barrier for the read side because the read
154 // side naturally has a data dependency order. All architectures that
155 // Go supports or seems likely to ever support automatically enforce
156 // data dependency ordering.
157 func publicationBarrier()
159 //go:noescape
160 func setcallerpc(argp unsafe.Pointer, pc uintptr)
162 // getcallerpc returns the program counter (PC) of its caller's caller.
163 // getcallersp returns the stack pointer (SP) of its caller's caller.
164 // For both, the argp must be a pointer to the caller's first function argument.
165 // The implementation may or may not use argp, depending on
166 // the architecture.
168 // For example:
170 // func f(arg1, arg2, arg3 int) {
171 // pc := getcallerpc(unsafe.Pointer(&arg1))
172 // sp := getcallersp(unsafe.Pointer(&arg1))
173 // }
175 // These two lines find the PC and SP immediately following
176 // the call to f (where f will return).
178 // The call to getcallerpc and getcallersp must be done in the
179 // frame being asked about. It would not be correct for f to pass &arg1
180 // to another function g and let g call getcallerpc/getcallersp.
181 // The call inside g might return information about g's caller or
182 // information about f's caller or complete garbage.
184 // The result of getcallersp is correct at the time of the return,
185 // but it may be invalidated by any subsequent call to a function
186 // that might relocate the stack in order to grow or shrink it.
187 // A general rule is that the result of getcallersp should be used
188 // immediately and can only be passed to nosplit functions.
190 //go:noescape
191 func getcallerpc(argp unsafe.Pointer) uintptr
193 //go:noescape
194 func getcallersp(argp unsafe.Pointer) uintptr
196 // argp used in Defer structs when there is no argp.
197 const _NoArgs = ^uintptr(0)
199 //go:linkname time_now time.now
200 func time_now() (sec int64, nsec int32)
202 // For gccgo, expose this for C callers.
203 //go:linkname unixnanotime runtime.unixnanotime
204 func unixnanotime() int64 {
205 sec, nsec := time_now()
206 return sec*1e9 + int64(nsec)
209 // round n up to a multiple of a. a must be a power of 2.
210 func round(n, a uintptr) uintptr {
211 return (n + a - 1) &^ (a - 1)
214 // checkASM returns whether assembly runtime checks have passed.
215 func checkASM() bool {
216 return true
219 // For gccgo this is in the C code.
220 func osyield()
222 // For gccgo this can be called directly.
223 //extern syscall
224 func syscall(trap uintptr, a1, a2, a3, a4, a5, a6 uintptr) uintptr
226 // throw crashes the program.
227 // For gccgo unless and until we port panic.go.
228 func throw(string)
230 // newobject allocates a new object.
231 // For gccgo unless and until we port malloc.go.
232 func newobject(*_type) unsafe.Pointer
234 // newarray allocates a new array of objects.
235 // For gccgo unless and until we port malloc.go.
236 func newarray(*_type, int) unsafe.Pointer
238 // funcPC returns the entry PC of the function f.
239 // It assumes that f is a func value. Otherwise the behavior is undefined.
240 // For gccgo here unless and until we port proc.go.
241 //go:nosplit
242 func funcPC(f interface{}) uintptr {
243 return **(**uintptr)(add(unsafe.Pointer(&f), sys.PtrSize))
246 // typedmemmove copies a typed value.
247 // For gccgo for now.
248 //go:nosplit
249 func typedmemmove(typ *_type, dst, src unsafe.Pointer) {
250 memmove(dst, src, typ.size)
253 // Here for gccgo unless and until we port slice.go.
254 type slice struct {
255 array unsafe.Pointer
256 len int
257 cap int
260 // Here for gccgo until we port malloc.go.
261 const (
262 _64bit = 1 << (^uintptr(0) >> 63) / 2
263 _MHeapMap_TotalBits = (_64bit*sys.GoosWindows)*35 + (_64bit*(1-sys.GoosWindows)*(1-sys.GoosDarwin*sys.GoarchArm64))*39 + sys.GoosDarwin*sys.GoarchArm64*31 + (1-_64bit)*32
264 _MaxMem = uintptr(1<<_MHeapMap_TotalBits - 1)
267 // Here for gccgo until we port malloc.go.
268 //extern runtime_mallocgc
269 func c_mallocgc(size uintptr, typ uintptr, flag uint32) unsafe.Pointer
270 func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
271 flag := uint32(0)
272 if !needzero {
273 flag = 1 << 3
275 return c_mallocgc(size, uintptr(unsafe.Pointer(typ)), flag)
278 // Here for gccgo until we port mgc.go.
279 var writeBarrier struct {
280 enabled bool // compiler emits a check of this before calling write barrier
281 needed bool // whether we need a write barrier for current GC phase
282 cgo bool // whether we need a write barrier for a cgo check
283 alignme uint64 // guarantee alignment so that compiler can use a 32 or 64-bit load
286 // Here for gccgo until we port atomic_pointer.go and mgc.go.
287 //go:nosplit
288 func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool {
289 if !atomic.Casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) {
290 return false
292 return true
295 // Here for gccgo until we port lock_*.go.
296 func lock(l *mutex)
297 func unlock(l *mutex)
299 // Here for gccgo for Solaris.
300 func errno() int
302 // Temporary for gccgo until we port proc.go.
303 func entersyscall(int32)
304 func entersyscallblock(int32)
305 func exitsyscall(int32)
306 func gopark(func(*g, unsafe.Pointer) bool, unsafe.Pointer, string, byte, int)
307 func goparkunlock(*mutex, string, byte, int)
308 func goready(*g, int)
310 // Temporary hack for gccgo until we port proc.go.
311 //go:nosplit
312 func acquireSudog() *sudog {
313 mp := acquirem()
314 pp := mp.p.ptr()
315 if len(pp.sudogcache) == 0 {
316 pp.sudogcache = append(pp.sudogcache, new(sudog))
318 n := len(pp.sudogcache)
319 s := pp.sudogcache[n-1]
320 pp.sudogcache[n-1] = nil
321 pp.sudogcache = pp.sudogcache[:n-1]
322 if s.elem != nil {
323 throw("acquireSudog: found s.elem != nil in cache")
325 releasem(mp)
326 return s
329 // Temporary hack for gccgo until we port proc.go.
330 //go:nosplit
331 func releaseSudog(s *sudog) {
332 if s.elem != nil {
333 throw("runtime: sudog with non-nil elem")
335 if s.selectdone != nil {
336 throw("runtime: sudog with non-nil selectdone")
338 if s.next != nil {
339 throw("runtime: sudog with non-nil next")
341 if s.prev != nil {
342 throw("runtime: sudog with non-nil prev")
344 if s.waitlink != nil {
345 throw("runtime: sudog with non-nil waitlink")
347 if s.c != nil {
348 throw("runtime: sudog with non-nil c")
350 gp := getg()
351 if gp.param != nil {
352 throw("runtime: releaseSudog with non-nil gp.param")
354 mp := acquirem() // avoid rescheduling to another P
355 pp := mp.p.ptr()
356 pp.sudogcache = append(pp.sudogcache, s)
357 releasem(mp)
360 // Temporary hack for gccgo until we port the garbage collector.
361 func typeBitsBulkBarrier(typ *_type, p, size uintptr) {}
363 // Here for gccgo until we port msize.go.
364 func roundupsize(uintptr) uintptr
366 // Here for gccgo until we port mgc.go.
367 func GC()
369 // Here for gccgo until we port proc.go.
370 var worldsema uint32 = 1
372 func stopTheWorldWithSema()
373 func startTheWorldWithSema()
375 // For gccgo to call from C code.
376 //go:linkname acquireWorldsema runtime.acquireWorldsema
377 func acquireWorldsema() {
378 semacquire(&worldsema, false)
381 // For gccgo to call from C code.
382 //go:linkname releaseWorldsema runtime.releaseWorldsema
383 func releaseWorldsema() {
384 semrelease(&worldsema)
387 // Here for gccgo until we port proc.go.
388 func stopTheWorld(reason string) {
389 semacquire(&worldsema, false)
390 getg().m.preemptoff = reason
391 getg().m.gcing = 1
392 systemstack(stopTheWorldWithSema)
395 // Here for gccgo until we port proc.go.
396 func startTheWorld() {
397 getg().m.gcing = 0
398 getg().m.locks++
399 systemstack(startTheWorldWithSema)
400 // worldsema must be held over startTheWorldWithSema to ensure
401 // gomaxprocs cannot change while worldsema is held.
402 semrelease(&worldsema)
403 getg().m.preemptoff = ""
404 getg().m.locks--
407 // For gccgo to call from C code, so that the C code and the Go code
408 // can share the memstats variable for now.
409 //go:linkname getMstats runtime.getMstats
410 func getMstats() *mstats {
411 return &memstats
414 // Temporary for gccgo until we port proc.go.
415 func setcpuprofilerate_m(hz int32)
417 // Temporary for gccgo until we port mem_GOOS.go.
418 func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer
420 // Temporary for gccgo until we port proc.go, so that the C signal
421 // handler can call into cpuprof.
422 //go:linkname cpuprofAdd runtime.cpuprofAdd
423 func cpuprofAdd(stk []uintptr) {
424 cpuprof.add(stk)
427 // For gccgo until we port proc.go.
428 func Breakpoint()
429 func LockOSThread()
430 func UnlockOSThread()
431 func allm() *m
432 func allgs() []*g
434 //go:nosplit
435 func readgstatus(gp *g) uint32 {
436 return atomic.Load(&gp.atomicstatus)
439 // Temporary for gccgo until we port malloc.go
440 func persistentalloc(size, align uintptr, sysStat *uint64) unsafe.Pointer
442 // Temporary for gccgo until we port mheap.go
443 func setprofilebucket(p unsafe.Pointer, b *bucket)
445 // Currently in proc.c.
446 func tracebackothers(*g)