Handle PROCESSOR_IAMCU in ix86_target_macros_internal
[official-gcc.git] / libgo / runtime / runtime.h
blobb9264236227a532461281a30b9ecc0c7499067f4
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 #include "config.h"
7 #include "go-assert.h"
8 #include <complex.h>
9 #include <signal.h>
10 #include <stdio.h>
11 #include <stdlib.h>
12 #include <string.h>
13 #include <sys/types.h>
14 #include <sys/stat.h>
15 #include <fcntl.h>
16 #include <unistd.h>
17 #include <pthread.h>
18 #include <semaphore.h>
19 #include <ucontext.h>
21 #ifdef HAVE_SYS_MMAN_H
22 #include <sys/mman.h>
23 #endif
25 #include "interface.h"
26 #include "go-alloc.h"
28 #define _STRINGIFY2_(x) #x
29 #define _STRINGIFY_(x) _STRINGIFY2_(x)
30 #define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
32 /* This file supports C files copied from the 6g runtime library.
33 This is a version of the 6g runtime.h rewritten for gccgo's version
34 of the code. */
36 typedef signed int int8 __attribute__ ((mode (QI)));
37 typedef unsigned int uint8 __attribute__ ((mode (QI)));
38 typedef signed int int16 __attribute__ ((mode (HI)));
39 typedef unsigned int uint16 __attribute__ ((mode (HI)));
40 typedef signed int int32 __attribute__ ((mode (SI)));
41 typedef unsigned int uint32 __attribute__ ((mode (SI)));
42 typedef signed int int64 __attribute__ ((mode (DI)));
43 typedef unsigned int uint64 __attribute__ ((mode (DI)));
44 typedef float float32 __attribute__ ((mode (SF)));
45 typedef double float64 __attribute__ ((mode (DF)));
46 typedef signed int intptr __attribute__ ((mode (pointer)));
47 typedef unsigned int uintptr __attribute__ ((mode (pointer)));
49 typedef intptr intgo; // Go's int
50 typedef uintptr uintgo; // Go's uint
52 typedef uintptr uintreg;
54 /* Defined types. */
56 typedef uint8 bool;
57 typedef uint8 byte;
58 typedef struct Func Func;
59 typedef struct G G;
60 typedef struct Lock Lock;
61 typedef struct M M;
62 typedef struct P P;
63 typedef struct Note Note;
64 typedef struct String String;
65 typedef struct FuncVal FuncVal;
66 typedef struct SigTab SigTab;
67 typedef struct MCache MCache;
68 typedef struct FixAlloc FixAlloc;
69 typedef struct Hchan Hchan;
70 typedef struct Timers Timers;
71 typedef struct Timer Timer;
72 typedef struct GCStats GCStats;
73 typedef struct LFNode LFNode;
74 typedef struct ParFor ParFor;
75 typedef struct ParForThread ParForThread;
76 typedef struct CgoMal CgoMal;
77 typedef struct PollDesc PollDesc;
78 typedef struct DebugVars DebugVars;
80 typedef struct __go_open_array Slice;
81 typedef struct __go_interface Iface;
82 typedef struct __go_empty_interface Eface;
83 typedef struct __go_type_descriptor Type;
84 typedef struct __go_defer_stack Defer;
85 typedef struct __go_panic_stack Panic;
87 typedef struct __go_ptr_type PtrType;
88 typedef struct __go_func_type FuncType;
89 typedef struct __go_interface_type InterfaceType;
90 typedef struct __go_map_type MapType;
91 typedef struct __go_channel_type ChanType;
93 typedef struct Traceback Traceback;
95 typedef struct Location Location;
98 * Per-CPU declaration.
100 extern M* runtime_m(void);
101 extern G* runtime_g(void);
103 extern M runtime_m0;
104 extern G runtime_g0;
107 * defined constants
109 enum
111 // G status
113 // If you add to this list, add to the list
114 // of "okay during garbage collection" status
115 // in mgc0.c too.
116 Gidle,
117 Grunnable,
118 Grunning,
119 Gsyscall,
120 Gwaiting,
121 Gmoribund_unused, // currently unused, but hardcoded in gdb scripts
122 Gdead,
124 enum
126 // P status
127 Pidle,
128 Prunning,
129 Psyscall,
130 Pgcstop,
131 Pdead,
133 enum
135 true = 1,
136 false = 0,
138 enum
140 PtrSize = sizeof(void*),
142 enum
144 // Per-M stack segment cache size.
145 StackCacheSize = 32,
146 // Global <-> per-M stack segment cache transfer batch size.
147 StackCacheBatch = 16,
150 * structures
152 struct Lock
154 // Futex-based impl treats it as uint32 key,
155 // while sema-based impl as M* waitm.
156 // Used to be a union, but unions break precise GC.
157 uintptr key;
159 struct Note
161 // Futex-based impl treats it as uint32 key,
162 // while sema-based impl as M* waitm.
163 // Used to be a union, but unions break precise GC.
164 uintptr key;
166 struct String
168 const byte* str;
169 intgo len;
171 struct FuncVal
173 void (*fn)(void);
174 // variable-size, fn-specific data here
176 struct GCStats
178 // the struct must consist of only uint64's,
179 // because it is casted to uint64[].
180 uint64 nhandoff;
181 uint64 nhandoffcnt;
182 uint64 nprocyield;
183 uint64 nosyield;
184 uint64 nsleep;
187 // A location in the program, used for backtraces.
188 struct Location
190 uintptr pc;
191 String filename;
192 String function;
193 intgo lineno;
196 struct G
198 Defer* defer;
199 Panic* panic;
200 void* exception; // current exception being thrown
201 bool is_foreign; // whether current exception from other language
202 void *gcstack; // if status==Gsyscall, gcstack = stackbase to use during gc
203 uintptr gcstack_size;
204 void* gcnext_segment;
205 void* gcnext_sp;
206 void* gcinitial_sp;
207 ucontext_t gcregs;
208 byte* entry; // initial function
209 void* param; // passed parameter on wakeup
210 bool fromgogo; // reached from gogo
211 int16 status;
212 uint32 selgen; // valid sudog pointer
213 int64 goid;
214 int64 waitsince; // approx time when the G become blocked
215 const char* waitreason; // if status==Gwaiting
216 G* schedlink;
217 bool ispanic;
218 bool issystem; // do not output in stack dump
219 bool isbackground; // ignore in deadlock detector
220 bool paniconfault; // panic (instead of crash) on unexpected fault address
221 M* m; // for debuggers, but offset not hard-coded
222 M* lockedm;
223 int32 sig;
224 int32 writenbuf;
225 byte* writebuf;
226 uintptr sigcode0;
227 uintptr sigcode1;
228 // uintptr sigpc;
229 uintptr gopc; // pc of go statement that created this goroutine
231 int32 ncgo;
232 CgoMal* cgomal;
234 Traceback* traceback;
236 ucontext_t context;
237 void* stack_context[10];
240 struct M
242 G* g0; // goroutine with scheduling stack
243 G* gsignal; // signal-handling G
244 byte* gsignalstack;
245 size_t gsignalstacksize;
246 void (*mstartfn)(void);
247 G* curg; // current running goroutine
248 G* caughtsig; // goroutine running during fatal signal
249 P* p; // attached P for executing Go code (nil if not executing Go code)
250 P* nextp;
251 int32 id;
252 int32 mallocing;
253 int32 throwing;
254 int32 gcing;
255 int32 locks;
256 int32 softfloat;
257 int32 dying;
258 int32 profilehz;
259 int32 helpgc;
260 bool spinning; // M is out of work and is actively looking for work
261 bool blocked; // M is blocked on a Note
262 uint32 fastrand;
263 uint64 ncgocall; // number of cgo calls in total
264 int32 ncgo; // number of cgo calls currently in progress
265 CgoMal* cgomal;
266 Note park;
267 M* alllink; // on allm
268 M* schedlink;
269 MCache *mcache;
270 G* lockedg;
271 Location createstack[32]; // Stack that created this thread.
272 uint32 locked; // tracking for LockOSThread
273 M* nextwaitm; // next M waiting for lock
274 uintptr waitsema; // semaphore for parking on locks
275 uint32 waitsemacount;
276 uint32 waitsemalock;
277 GCStats gcstats;
278 bool needextram;
279 bool dropextram; // for gccgo: drop after call is done.
280 uint8 traceback;
281 bool (*waitunlockf)(G*, void*);
282 void* waitlock;
283 uintptr end[];
286 struct P
288 Lock;
290 int32 id;
291 uint32 status; // one of Pidle/Prunning/...
292 P* link;
293 uint32 schedtick; // incremented on every scheduler call
294 uint32 syscalltick; // incremented on every system call
295 M* m; // back-link to associated M (nil if idle)
296 MCache* mcache;
297 Defer* deferpool; // pool of available Defer structs (see panic.c)
299 // Cache of goroutine ids, amortizes accesses to runtime_sched.goidgen.
300 uint64 goidcache;
301 uint64 goidcacheend;
303 // Queue of runnable goroutines.
304 uint32 runqhead;
305 uint32 runqtail;
306 G* runq[256];
308 // Available G's (status == Gdead)
309 G* gfree;
310 int32 gfreecnt;
312 byte pad[64];
315 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
316 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
317 // External locks are not recursive; a second lock is silently ignored.
318 // The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
319 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
320 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main
321 // goroutine is holding the lock during the initialization phase.
322 enum
324 LockExternal = 1,
325 LockInternal = 2,
328 struct SigTab
330 int32 sig;
331 int32 flags;
333 enum
335 SigNotify = 1<<0, // let signal.Notify have signal, even if from kernel
336 SigKill = 1<<1, // if signal.Notify doesn't take it, exit quietly
337 SigThrow = 1<<2, // if signal.Notify doesn't take it, exit loudly
338 SigPanic = 1<<3, // if the signal is from the kernel, panic
339 SigDefault = 1<<4, // if the signal isn't explicitly requested, don't monitor it
340 SigHandling = 1<<5, // our signal handler is registered
341 SigIgnored = 1<<6, // the signal was ignored before we registered for it
342 SigGoExit = 1<<7, // cause all runtime procs to exit (only used on Plan 9).
345 // Layout of in-memory per-function information prepared by linker
346 // See http://golang.org/s/go12symtab.
347 // Keep in sync with linker and with ../../libmach/sym.c
348 // and with package debug/gosym.
349 struct Func
351 String name;
352 uintptr entry; // entry pc
355 #ifdef GOOS_nacl
356 enum {
357 NaCl = 1,
359 #else
360 enum {
361 NaCl = 0,
363 #endif
365 #ifdef GOOS_windows
366 enum {
367 Windows = 1
369 #else
370 enum {
371 Windows = 0
373 #endif
374 #ifdef GOOS_solaris
375 enum {
376 Solaris = 1
378 #else
379 enum {
380 Solaris = 0
382 #endif
384 struct Timers
386 Lock;
387 G *timerproc;
388 bool sleeping;
389 bool rescheduling;
390 Note waitnote;
391 Timer **t;
392 int32 len;
393 int32 cap;
396 // Package time knows the layout of this structure.
397 // If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
398 // For GOOS=nacl, package syscall knows the layout of this structure.
399 // If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
400 struct Timer
402 intgo i; // heap index
404 // Timer wakes up at when, and then at when+period, ... (period > 0 only)
405 // each time calling f(now, arg) in the timer goroutine, so f must be
406 // a well-behaved function and not block.
407 int64 when;
408 int64 period;
409 FuncVal *fv;
410 Eface arg;
411 uintptr seq;
414 // Lock-free stack node.
415 struct LFNode
417 LFNode *next;
418 uintptr pushcnt;
421 // Parallel for descriptor.
422 struct ParFor
424 void (*body)(ParFor*, uint32); // executed for each element
425 uint32 done; // number of idle threads
426 uint32 nthr; // total number of threads
427 uint32 nthrmax; // maximum number of threads
428 uint32 thrseq; // thread id sequencer
429 uint32 cnt; // iteration space [0, cnt)
430 void *ctx; // arbitrary user context
431 bool wait; // if true, wait while all threads finish processing,
432 // otherwise parfor may return while other threads are still working
433 ParForThread *thr; // array of thread descriptors
434 uint32 pad; // to align ParForThread.pos for 64-bit atomic operations
435 // stats
436 uint64 nsteal;
437 uint64 nstealcnt;
438 uint64 nprocyield;
439 uint64 nosyield;
440 uint64 nsleep;
443 // Track memory allocated by code not written in Go during a cgo call,
444 // so that the garbage collector can see them.
445 struct CgoMal
447 CgoMal *next;
448 void *alloc;
451 // Holds variables parsed from GODEBUG env var.
452 struct DebugVars
454 int32 allocfreetrace;
455 int32 efence;
456 int32 gctrace;
457 int32 gcdead;
458 int32 scheddetail;
459 int32 schedtrace;
462 extern bool runtime_precisestack;
463 extern bool runtime_copystack;
466 * defined macros
467 * you need super-gopher-guru privilege
468 * to add this list.
470 #define nelem(x) (sizeof(x)/sizeof((x)[0]))
471 #define nil ((void*)0)
472 #define USED(v) ((void) v)
473 #define ROUND(x, n) (((x)+(n)-1)&~(uintptr)((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
475 byte* runtime_startup_random_data;
476 uint32 runtime_startup_random_data_len;
477 void runtime_get_random_data(byte**, int32*);
479 enum {
480 // hashinit wants this many random bytes
481 HashRandomBytes = 32
483 void runtime_hashinit(void);
485 void runtime_traceback(void);
486 void runtime_tracebackothers(G*);
487 enum
489 // The maximum number of frames we print for a traceback
490 TracebackMaxFrames = 100,
494 * external data
496 extern uintptr runtime_zerobase;
497 extern G** runtime_allg;
498 extern uintptr runtime_allglen;
499 extern G* runtime_lastg;
500 extern M* runtime_allm;
501 extern P** runtime_allp;
502 extern int32 runtime_gomaxprocs;
503 extern uint32 runtime_needextram;
504 extern uint32 runtime_panicking;
505 extern int8* runtime_goos;
506 extern int32 runtime_ncpu;
507 extern void (*runtime_sysargs)(int32, uint8**);
508 extern uint32 runtime_Hchansize;
509 extern DebugVars runtime_debug;
510 extern uintptr runtime_maxstacksize;
512 extern bool runtime_isstarted;
513 extern bool runtime_isarchive;
516 * common functions and data
518 #define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2))
519 #define runtime_strncmp(s1, s2, n) __builtin_strncmp((s1), (s2), (n))
520 #define runtime_strstr(s1, s2) __builtin_strstr((s1), (s2))
521 intgo runtime_findnull(const byte*);
522 intgo runtime_findnullw(const uint16*);
523 void runtime_dump(byte*, int32);
525 void runtime_gogo(G*);
526 struct __go_func_type;
527 void runtime_args(int32, byte**);
528 void runtime_osinit();
529 void runtime_goargs(void);
530 void runtime_goenvs(void);
531 void runtime_goenvs_unix(void);
532 void runtime_throw(const char*) __attribute__ ((noreturn));
533 void runtime_panicstring(const char*) __attribute__ ((noreturn));
534 bool runtime_canpanic(G*);
535 void runtime_prints(const char*);
536 void runtime_printf(const char*, ...);
537 int32 runtime_snprintf(byte*, int32, const char*, ...);
538 #define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
539 #define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
540 void* runtime_mal(uintptr);
541 String runtime_gostring(const byte*);
542 String runtime_gostringnocopy(const byte*);
543 void runtime_schedinit(void);
544 void runtime_initsig(void);
545 void runtime_sigenable(uint32 sig);
546 void runtime_sigdisable(uint32 sig);
547 int32 runtime_gotraceback(bool *crash);
548 void runtime_goroutineheader(G*);
549 void runtime_printtrace(Location*, int32, bool);
550 #define runtime_open(p, f, m) open((p), (f), (m))
551 #define runtime_read(d, v, n) read((d), (v), (n))
552 #define runtime_write(d, v, n) write((d), (v), (n))
553 #define runtime_close(d) close(d)
554 void runtime_ready(G*);
555 const byte* runtime_getenv(const char*);
556 int32 runtime_atoi(const byte*);
557 void* runtime_mstart(void*);
558 G* runtime_malg(int32, byte**, size_t*);
559 void runtime_mpreinit(M*);
560 void runtime_minit(void);
561 void runtime_unminit(void);
562 void runtime_needm(void);
563 void runtime_dropm(void);
564 void runtime_signalstack(byte*, int32);
565 MCache* runtime_allocmcache(void);
566 void runtime_freemcache(MCache*);
567 void runtime_mallocinit(void);
568 void runtime_mprofinit(void);
569 #define runtime_malloc(s) __go_alloc(s)
570 #define runtime_free(p) __go_free(p)
571 #define runtime_getcallersp(p) __builtin_frame_address(1)
572 int32 runtime_mcount(void);
573 int32 runtime_gcount(void);
574 void runtime_mcall(void(*)(G*));
575 uint32 runtime_fastrand1(void);
576 int32 runtime_timediv(int64, int32, int32*);
577 int32 runtime_round2(int32 x); // round x up to a power of 2.
579 // atomic operations
580 #define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
581 #define runtime_cas64(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
582 #define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
583 // Don't confuse with XADD x86 instruction,
584 // this one is actually 'addx', that is, add-and-fetch.
585 #define runtime_xadd(p, v) __sync_add_and_fetch (p, v)
586 #define runtime_xadd64(p, v) __sync_add_and_fetch (p, v)
587 #define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
588 #define runtime_xchg64(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
589 #define runtime_xchgp(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
590 #define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
591 #define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
592 #define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
593 #define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
594 #define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
595 #define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
597 void runtime_setmg(M*, G*);
598 void runtime_newextram(void);
599 #define runtime_exit(s) exit(s)
600 #define runtime_breakpoint() __builtin_trap()
601 void runtime_gosched(void);
602 void runtime_gosched0(G*);
603 void runtime_schedtrace(bool);
604 void runtime_park(bool(*)(G*, void*), void*, const char*);
605 void runtime_parkunlock(Lock*, const char*);
606 void runtime_tsleep(int64, const char*);
607 M* runtime_newm(void);
608 void runtime_goexit(void);
609 void runtime_entersyscall(void) __asm__ (GOSYM_PREFIX "syscall.Entersyscall");
610 void runtime_entersyscallblock(void);
611 void runtime_exitsyscall(void) __asm__ (GOSYM_PREFIX "syscall.Exitsyscall");
612 G* __go_go(void (*pfn)(void*), void*);
613 void siginit(void);
614 bool __go_sigsend(int32 sig);
615 int32 runtime_callers(int32, Location*, int32, bool keep_callers);
616 int64 runtime_nanotime(void); // monotonic time
617 int64 runtime_unixnanotime(void); // real time, can skip
618 void runtime_dopanic(int32) __attribute__ ((noreturn));
619 void runtime_startpanic(void);
620 void runtime_freezetheworld(void);
621 void runtime_unwindstack(G*, byte*);
622 void runtime_sigprof();
623 void runtime_resetcpuprofiler(int32);
624 void runtime_setcpuprofilerate(void(*)(uintptr*, int32), int32);
625 void runtime_usleep(uint32);
626 int64 runtime_cputicks(void);
627 int64 runtime_tickspersecond(void);
628 void runtime_blockevent(int64, int32);
629 extern int64 runtime_blockprofilerate;
630 void runtime_addtimer(Timer*);
631 bool runtime_deltimer(Timer*);
632 G* runtime_netpoll(bool);
633 void runtime_netpollinit(void);
634 int32 runtime_netpollopen(uintptr, PollDesc*);
635 int32 runtime_netpollclose(uintptr);
636 void runtime_netpollready(G**, PollDesc*, int32);
637 uintptr runtime_netpollfd(PollDesc*);
638 void runtime_netpollarm(PollDesc*, int32);
639 void** runtime_netpolluser(PollDesc*);
640 bool runtime_netpollclosing(PollDesc*);
641 void runtime_netpolllock(PollDesc*);
642 void runtime_netpollunlock(PollDesc*);
643 void runtime_crash(void);
644 void runtime_parsedebugvars(void);
645 void _rt0_go(void);
646 void* runtime_funcdata(Func*, int32);
647 int32 runtime_setmaxthreads(int32);
648 G* runtime_timejump(void);
649 void runtime_iterate_finq(void (*callback)(FuncVal*, void*, const FuncType*, const PtrType*));
651 void runtime_stoptheworld(void);
652 void runtime_starttheworld(void);
653 extern uint32 runtime_worldsema;
656 * mutual exclusion locks. in the uncontended case,
657 * as fast as spin locks (just a few user-level instructions),
658 * but on the contention path they sleep in the kernel.
659 * a zeroed Lock is unlocked (no need to initialize each lock).
661 void runtime_lock(Lock*);
662 void runtime_unlock(Lock*);
665 * sleep and wakeup on one-time events.
666 * before any calls to notesleep or notewakeup,
667 * must call noteclear to initialize the Note.
668 * then, exactly one thread can call notesleep
669 * and exactly one thread can call notewakeup (once).
670 * once notewakeup has been called, the notesleep
671 * will return. future notesleep will return immediately.
672 * subsequent noteclear must be called only after
673 * previous notesleep has returned, e.g. it's disallowed
674 * to call noteclear straight after notewakeup.
676 * notetsleep is like notesleep but wakes up after
677 * a given number of nanoseconds even if the event
678 * has not yet happened. if a goroutine uses notetsleep to
679 * wake up early, it must wait to call noteclear until it
680 * can be sure that no other goroutine is calling
681 * notewakeup.
683 * notesleep/notetsleep are generally called on g0,
684 * notetsleepg is similar to notetsleep but is called on user g.
686 void runtime_noteclear(Note*);
687 void runtime_notesleep(Note*);
688 void runtime_notewakeup(Note*);
689 bool runtime_notetsleep(Note*, int64); // false - timeout
690 bool runtime_notetsleepg(Note*, int64); // false - timeout
693 * low-level synchronization for implementing the above
695 uintptr runtime_semacreate(void);
696 int32 runtime_semasleep(int64);
697 void runtime_semawakeup(M*);
698 // or
699 void runtime_futexsleep(uint32*, uint32, int64);
700 void runtime_futexwakeup(uint32*, uint32);
703 * Lock-free stack.
704 * Initialize uint64 head to 0, compare with 0 to test for emptiness.
705 * The stack does not keep pointers to nodes,
706 * so they can be garbage collected if there are no other pointers to nodes.
708 void runtime_lfstackpush(uint64 *head, LFNode *node)
709 __asm__ (GOSYM_PREFIX "runtime.lfstackpush");
710 LFNode* runtime_lfstackpop(uint64 *head);
713 * Parallel for over [0, n).
714 * body() is executed for each iteration.
715 * nthr - total number of worker threads.
716 * ctx - arbitrary user context.
717 * if wait=true, threads return from parfor() when all work is done;
718 * otherwise, threads can return while other threads are still finishing processing.
720 ParFor* runtime_parforalloc(uint32 nthrmax);
721 void runtime_parforsetup(ParFor *desc, uint32 nthr, uint32 n, void *ctx, bool wait, void (*body)(ParFor*, uint32));
722 void runtime_parfordo(ParFor *desc);
723 void runtime_parforiters(ParFor*, uintptr, uintptr*, uintptr*);
726 * low level C-called
728 #define runtime_mmap mmap
729 #define runtime_munmap munmap
730 #define runtime_madvise madvise
731 #define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
732 #define runtime_getcallerpc(p) __builtin_return_address(0)
734 #ifdef __rtems__
735 void __wrap_rtems_task_variable_add(void **);
736 #endif
739 * Names generated by gccgo.
741 #define runtime_printbool __go_print_bool
742 #define runtime_printfloat __go_print_double
743 #define runtime_printint __go_print_int64
744 #define runtime_printiface __go_print_interface
745 #define runtime_printeface __go_print_empty_interface
746 #define runtime_printstring __go_print_string
747 #define runtime_printpointer __go_print_pointer
748 #define runtime_printuint __go_print_uint64
749 #define runtime_printslice __go_print_slice
750 #define runtime_printcomplex __go_print_complex
753 * runtime go-called
755 void runtime_printbool(_Bool);
756 void runtime_printbyte(int8);
757 void runtime_printfloat(double);
758 void runtime_printint(int64);
759 void runtime_printiface(Iface);
760 void runtime_printeface(Eface);
761 void runtime_printstring(String);
762 void runtime_printpc(void*);
763 void runtime_printpointer(void*);
764 void runtime_printuint(uint64);
765 void runtime_printhex(uint64);
766 void runtime_printslice(Slice);
767 void runtime_printcomplex(complex double);
768 void reflect_call(const struct __go_func_type *, FuncVal *, _Bool, _Bool,
769 void **, void **)
770 __asm__ (GOSYM_PREFIX "reflect.call");
771 #define runtime_panic __go_panic
774 * runtime c-called (but written in Go)
776 void runtime_printany(Eface)
777 __asm__ (GOSYM_PREFIX "runtime.Printany");
778 void runtime_newTypeAssertionError(const String*, const String*, const String*, const String*, Eface*)
779 __asm__ (GOSYM_PREFIX "runtime.NewTypeAssertionError");
780 void runtime_newErrorCString(const char*, Eface*)
781 __asm__ (GOSYM_PREFIX "runtime.NewErrorCString");
784 * wrapped for go users
786 void runtime_semacquire(uint32 volatile *, bool);
787 void runtime_semrelease(uint32 volatile *);
788 int32 runtime_gomaxprocsfunc(int32 n);
789 void runtime_procyield(uint32);
790 void runtime_osyield(void);
791 void runtime_lockOSThread(void);
792 void runtime_unlockOSThread(void);
793 bool runtime_lockedOSThread(void);
795 bool runtime_showframe(String, bool);
796 void runtime_printcreatedby(G*);
798 uintptr runtime_memlimit(void);
800 #define ISNAN(f) __builtin_isnan(f)
802 enum
804 UseSpanType = 1,
807 #define runtime_setitimer setitimer
809 void runtime_check(void);
811 // A list of global variables that the garbage collector must scan.
812 struct root_list {
813 struct root_list *next;
814 struct root {
815 void *decl;
816 size_t size;
817 } roots[];
820 void __go_register_gc_roots(struct root_list*);
822 // Size of stack space allocated using Go's allocator.
823 // This will be 0 when using split stacks, as in that case
824 // the stacks are allocated by the splitstack library.
825 extern uintptr runtime_stacks_sys;
827 struct backtrace_state;
828 extern struct backtrace_state *__go_get_backtrace_state(void);
829 extern _Bool __go_file_line(uintptr, String*, String*, intgo *);
830 extern byte* runtime_progname();
831 extern void runtime_main(void*);
832 extern uint32 runtime_in_callers;
834 int32 getproccount(void);
836 #define PREFETCH(p) __builtin_prefetch(p)
838 bool runtime_gcwaiting(void);
839 void runtime_badsignal(int);
840 Defer* runtime_newdefer(void);
841 void runtime_freedefer(Defer*);
843 struct time_now_ret
845 int64_t sec;
846 int32_t nsec;
849 struct time_now_ret now() __asm__ (GOSYM_PREFIX "time.now")
850 __attribute__ ((no_split_stack));
852 extern void _cgo_wait_runtime_init_done (void);
853 extern void _cgo_notify_runtime_init_done (void);
854 extern _Bool runtime_iscgo;
855 extern _Bool runtime_cgoHasExtraM;
856 extern Hchan *runtime_main_init_done;