1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
13 #include <sys/types.h>
18 #include <semaphore.h>
21 #ifdef HAVE_SYS_MMAN_H
25 #include "interface.h"
28 #define _STRINGIFY2_(x) #x
29 #define _STRINGIFY_(x) _STRINGIFY2_(x)
30 #define GOSYM_PREFIX _STRINGIFY_(__USER_LABEL_PREFIX__)
32 /* This file supports C files copied from the 6g runtime library.
33 This is a version of the 6g runtime.h rewritten for gccgo's version
36 typedef signed int int8
__attribute__ ((mode (QI
)));
37 typedef unsigned int uint8
__attribute__ ((mode (QI
)));
38 typedef signed int int16
__attribute__ ((mode (HI
)));
39 typedef unsigned int uint16
__attribute__ ((mode (HI
)));
40 typedef signed int int32
__attribute__ ((mode (SI
)));
41 typedef unsigned int uint32
__attribute__ ((mode (SI
)));
42 typedef signed int int64
__attribute__ ((mode (DI
)));
43 typedef unsigned int uint64
__attribute__ ((mode (DI
)));
44 typedef float float32
__attribute__ ((mode (SF
)));
45 typedef double float64
__attribute__ ((mode (DF
)));
46 typedef signed int intptr
__attribute__ ((mode (pointer
)));
47 typedef unsigned int uintptr
__attribute__ ((mode (pointer
)));
49 typedef intptr intgo
; // Go's int
50 typedef uintptr uintgo
; // Go's uint
52 typedef uintptr uintreg
;
58 typedef struct Func Func
;
60 typedef struct Lock Lock
;
63 typedef struct Note Note
;
64 typedef struct String String
;
65 typedef struct FuncVal FuncVal
;
66 typedef struct SigTab SigTab
;
67 typedef struct MCache MCache
;
68 typedef struct FixAlloc FixAlloc
;
69 typedef struct Hchan Hchan
;
70 typedef struct Timers Timers
;
71 typedef struct Timer Timer
;
72 typedef struct GCStats GCStats
;
73 typedef struct LFNode LFNode
;
74 typedef struct ParFor ParFor
;
75 typedef struct ParForThread ParForThread
;
76 typedef struct CgoMal CgoMal
;
77 typedef struct PollDesc PollDesc
;
78 typedef struct DebugVars DebugVars
;
80 typedef struct __go_open_array Slice
;
81 typedef struct __go_interface Iface
;
82 typedef struct __go_empty_interface Eface
;
83 typedef struct __go_type_descriptor Type
;
84 typedef struct __go_defer_stack Defer
;
85 typedef struct __go_panic_stack Panic
;
87 typedef struct __go_ptr_type PtrType
;
88 typedef struct __go_func_type FuncType
;
89 typedef struct __go_interface_type InterfaceType
;
90 typedef struct __go_map_type MapType
;
91 typedef struct __go_channel_type ChanType
;
93 typedef struct Traceback Traceback
;
95 typedef struct Location Location
;
98 * Per-CPU declaration.
100 extern M
* runtime_m(void);
101 extern G
* runtime_g(void);
113 // If you add to this list, add to the list
114 // of "okay during garbage collection" status
121 Gmoribund_unused
, // currently unused, but hardcoded in gdb scripts
140 PtrSize
= sizeof(void*),
144 // Per-M stack segment cache size.
146 // Global <-> per-M stack segment cache transfer batch size.
147 StackCacheBatch
= 16,
154 // Futex-based impl treats it as uint32 key,
155 // while sema-based impl as M* waitm.
156 // Used to be a union, but unions break precise GC.
161 // Futex-based impl treats it as uint32 key,
162 // while sema-based impl as M* waitm.
163 // Used to be a union, but unions break precise GC.
174 // variable-size, fn-specific data here
178 // the struct must consist of only uint64's,
179 // because it is casted to uint64[].
187 // A location in the program, used for backtraces.
198 void* closure
; // Closure value.
201 void* exception
; // current exception being thrown
202 bool is_foreign
; // whether current exception from other language
203 void *gcstack
; // if status==Gsyscall, gcstack = stackbase to use during gc
204 uintptr gcstack_size
;
205 void* gcnext_segment
;
209 byte
* entry
; // initial function
210 void* param
; // passed parameter on wakeup
211 bool fromgogo
; // reached from gogo
213 uint32 selgen
; // valid sudog pointer
215 int64 waitsince
; // approx time when the G become blocked
216 const char* waitreason
; // if status==Gwaiting
219 bool issystem
; // do not output in stack dump
220 bool isbackground
; // ignore in deadlock detector
221 bool paniconfault
; // panic (instead of crash) on unexpected fault address
222 M
* m
; // for debuggers, but offset not hard-coded
230 uintptr gopc
; // pc of go statement that created this goroutine
235 Traceback
* traceback
;
238 void* stack_context
[10];
243 G
* g0
; // goroutine with scheduling stack
244 G
* gsignal
; // signal-handling G
246 size_t gsignalstacksize
;
247 void (*mstartfn
)(void);
248 G
* curg
; // current running goroutine
249 G
* caughtsig
; // goroutine running during fatal signal
250 P
* p
; // attached P for executing Go code (nil if not executing Go code)
261 bool spinning
; // M is out of work and is actively looking for work
262 bool blocked
; // M is blocked on a Note
264 uint64 ncgocall
; // number of cgo calls in total
265 int32 ncgo
; // number of cgo calls currently in progress
268 M
* alllink
; // on allm
272 Location createstack
[32]; // Stack that created this thread.
273 uint32 locked
; // tracking for LockOSThread
274 M
* nextwaitm
; // next M waiting for lock
275 uintptr waitsema
; // semaphore for parking on locks
276 uint32 waitsemacount
;
280 bool dropextram
; // for gccgo: drop after call is done.
282 bool (*waitunlockf
)(G
*, void*);
292 uint32 status
; // one of Pidle/Prunning/...
294 uint32 schedtick
; // incremented on every scheduler call
295 uint32 syscalltick
; // incremented on every system call
296 M
* m
; // back-link to associated M (nil if idle)
298 Defer
* deferpool
; // pool of available Defer structs (see panic.c)
300 // Cache of goroutine ids, amortizes accesses to runtime_sched.goidgen.
304 // Queue of runnable goroutines.
309 // Available G's (status == Gdead)
316 // The m->locked word holds two pieces of state counting active calls to LockOSThread/lockOSThread.
317 // The low bit (LockExternal) is a boolean reporting whether any LockOSThread call is active.
318 // External locks are not recursive; a second lock is silently ignored.
319 // The upper bits of m->lockedcount record the nesting depth of calls to lockOSThread
320 // (counting up by LockInternal), popped by unlockOSThread (counting down by LockInternal).
321 // Internal locks can be recursive. For instance, a lock for cgo can occur while the main
322 // goroutine is holding the lock during the initialization phase.
336 SigNotify
= 1<<0, // let signal.Notify have signal, even if from kernel
337 SigKill
= 1<<1, // if signal.Notify doesn't take it, exit quietly
338 SigThrow
= 1<<2, // if signal.Notify doesn't take it, exit loudly
339 SigPanic
= 1<<3, // if the signal is from the kernel, panic
340 SigDefault
= 1<<4, // if the signal isn't explicitly requested, don't monitor it
341 SigHandling
= 1<<5, // our signal handler is registered
342 SigIgnored
= 1<<6, // the signal was ignored before we registered for it
343 SigGoExit
= 1<<7, // cause all runtime procs to exit (only used on Plan 9).
346 // Layout of in-memory per-function information prepared by linker
347 // See http://golang.org/s/go12symtab.
348 // Keep in sync with linker and with ../../libmach/sym.c
349 // and with package debug/gosym.
353 uintptr entry
; // entry pc
397 // Package time knows the layout of this structure.
398 // If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
399 // For GOOS=nacl, package syscall knows the layout of this structure.
400 // If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
403 int32 i
; // heap index
405 // Timer wakes up at when, and then at when+period, ... (period > 0 only)
406 // each time calling f(now, arg) in the timer goroutine, so f must be
407 // a well-behaved function and not block.
414 // Lock-free stack node.
421 // Parallel for descriptor.
424 void (*body
)(ParFor
*, uint32
); // executed for each element
425 uint32 done
; // number of idle threads
426 uint32 nthr
; // total number of threads
427 uint32 nthrmax
; // maximum number of threads
428 uint32 thrseq
; // thread id sequencer
429 uint32 cnt
; // iteration space [0, cnt)
430 void *ctx
; // arbitrary user context
431 bool wait
; // if true, wait while all threads finish processing,
432 // otherwise parfor may return while other threads are still working
433 ParForThread
*thr
; // array of thread descriptors
434 uint32 pad
; // to align ParForThread.pos for 64-bit atomic operations
443 // Track memory allocated by code not written in Go during a cgo call,
444 // so that the garbage collector can see them.
451 // Holds variables parsed from GODEBUG env var.
454 int32 allocfreetrace
;
462 extern bool runtime_precisestack
;
463 extern bool runtime_copystack
;
467 * you need super-gopher-guru privilege
470 #define nelem(x) (sizeof(x)/sizeof((x)[0]))
471 #define nil ((void*)0)
472 #define USED(v) ((void) v)
473 #define ROUND(x, n) (((x)+(n)-1)&~(uintptr)((n)-1)) /* all-caps to mark as macro: it evaluates n twice */
475 byte
* runtime_startup_random_data
;
476 uint32 runtime_startup_random_data_len
;
477 void runtime_get_random_data(byte
**, int32
*);
480 // hashinit wants this many random bytes
483 void runtime_hashinit(void);
485 void runtime_traceback(void);
486 void runtime_tracebackothers(G
*);
489 // The maximum number of frames we print for a traceback
490 TracebackMaxFrames
= 100,
496 extern uintptr runtime_zerobase
;
497 extern G
** runtime_allg
;
498 extern uintptr runtime_allglen
;
499 extern G
* runtime_lastg
;
500 extern M
* runtime_allm
;
501 extern P
** runtime_allp
;
502 extern int32 runtime_gomaxprocs
;
503 extern uint32 runtime_needextram
;
504 extern uint32 runtime_panicking
;
505 extern int8
* runtime_goos
;
506 extern int32 runtime_ncpu
;
507 extern void (*runtime_sysargs
)(int32
, uint8
**);
508 extern uint32 runtime_Hchansize
;
509 extern DebugVars runtime_debug
;
510 extern uintptr runtime_maxstacksize
;
513 * common functions and data
515 #define runtime_strcmp(s1, s2) __builtin_strcmp((s1), (s2))
516 #define runtime_strncmp(s1, s2, n) __builtin_strncmp((s1), (s2), (n))
517 #define runtime_strstr(s1, s2) __builtin_strstr((s1), (s2))
518 intgo
runtime_findnull(const byte
*);
519 intgo
runtime_findnullw(const uint16
*);
520 void runtime_dump(byte
*, int32
);
522 void runtime_gogo(G
*);
523 struct __go_func_type
;
524 void runtime_args(int32
, byte
**);
525 void runtime_osinit();
526 void runtime_goargs(void);
527 void runtime_goenvs(void);
528 void runtime_goenvs_unix(void);
529 void runtime_throw(const char*) __attribute__ ((noreturn
));
530 void runtime_panicstring(const char*) __attribute__ ((noreturn
));
531 bool runtime_canpanic(G
*);
532 void runtime_prints(const char*);
533 void runtime_printf(const char*, ...);
534 int32
runtime_snprintf(byte
*, int32
, const char*, ...);
535 #define runtime_mcmp(a, b, s) __builtin_memcmp((a), (b), (s))
536 #define runtime_memmove(a, b, s) __builtin_memmove((a), (b), (s))
537 void* runtime_mal(uintptr
);
538 String
runtime_gostring(const byte
*);
539 String
runtime_gostringnocopy(const byte
*);
540 void runtime_schedinit(void);
541 void runtime_initsig(void);
542 void runtime_sigenable(uint32 sig
);
543 void runtime_sigdisable(uint32 sig
);
544 int32
runtime_gotraceback(bool *crash
);
545 void runtime_goroutineheader(G
*);
546 void runtime_printtrace(Location
*, int32
, bool);
547 #define runtime_open(p, f, m) open((p), (f), (m))
548 #define runtime_read(d, v, n) read((d), (v), (n))
549 #define runtime_write(d, v, n) write((d), (v), (n))
550 #define runtime_close(d) close(d)
551 void runtime_ready(G
*);
552 const byte
* runtime_getenv(const char*);
553 int32
runtime_atoi(const byte
*);
554 void* runtime_mstart(void*);
555 G
* runtime_malg(int32
, byte
**, size_t*);
556 void runtime_mpreinit(M
*);
557 void runtime_minit(void);
558 void runtime_unminit(void);
559 void runtime_needm(void);
560 void runtime_dropm(void);
561 void runtime_signalstack(byte
*, int32
);
562 MCache
* runtime_allocmcache(void);
563 void runtime_freemcache(MCache
*);
564 void runtime_mallocinit(void);
565 void runtime_mprofinit(void);
566 #define runtime_malloc(s) __go_alloc(s)
567 #define runtime_free(p) __go_free(p)
568 #define runtime_getcallersp(p) __builtin_frame_address(1)
569 int32
runtime_mcount(void);
570 int32
runtime_gcount(void);
571 void runtime_mcall(void(*)(G
*));
572 uint32
runtime_fastrand1(void);
573 int32
runtime_timediv(int64
, int32
, int32
*);
574 int32
runtime_round2(int32 x
); // round x up to a power of 2.
577 #define runtime_cas(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
578 #define runtime_cas64(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
579 #define runtime_casp(pval, old, new) __sync_bool_compare_and_swap (pval, old, new)
580 // Don't confuse with XADD x86 instruction,
581 // this one is actually 'addx', that is, add-and-fetch.
582 #define runtime_xadd(p, v) __sync_add_and_fetch (p, v)
583 #define runtime_xadd64(p, v) __sync_add_and_fetch (p, v)
584 #define runtime_xchg(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
585 #define runtime_xchg64(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
586 #define runtime_xchgp(p, v) __atomic_exchange_n (p, v, __ATOMIC_SEQ_CST)
587 #define runtime_atomicload(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
588 #define runtime_atomicstore(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
589 #define runtime_atomicstore64(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
590 #define runtime_atomicload64(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
591 #define runtime_atomicloadp(p) __atomic_load_n (p, __ATOMIC_SEQ_CST)
592 #define runtime_atomicstorep(p, v) __atomic_store_n (p, v, __ATOMIC_SEQ_CST)
594 void runtime_setmg(M
*, G
*);
595 void runtime_newextram(void);
596 #define runtime_exit(s) exit(s)
597 #define runtime_breakpoint() __builtin_trap()
598 void runtime_gosched(void);
599 void runtime_gosched0(G
*);
600 void runtime_schedtrace(bool);
601 void runtime_park(bool(*)(G
*, void*), void*, const char*);
602 void runtime_parkunlock(Lock
*, const char*);
603 void runtime_tsleep(int64
, const char*);
604 M
* runtime_newm(void);
605 void runtime_goexit(void);
606 void runtime_entersyscall(void) __asm__ (GOSYM_PREFIX
"syscall.Entersyscall");
607 void runtime_entersyscallblock(void);
608 void runtime_exitsyscall(void) __asm__ (GOSYM_PREFIX
"syscall.Exitsyscall");
609 G
* __go_go(void (*pfn
)(void*), void*);
611 bool __go_sigsend(int32 sig
);
612 int32
runtime_callers(int32
, Location
*, int32
, bool keep_callers
);
613 int64
runtime_nanotime(void); // monotonic time
614 int64
runtime_unixnanotime(void); // real time, can skip
615 void runtime_dopanic(int32
) __attribute__ ((noreturn
));
616 void runtime_startpanic(void);
617 void runtime_freezetheworld(void);
618 void runtime_unwindstack(G
*, byte
*);
619 void runtime_sigprof();
620 void runtime_resetcpuprofiler(int32
);
621 void runtime_setcpuprofilerate(void(*)(uintptr
*, int32
), int32
);
622 void runtime_usleep(uint32
);
623 int64
runtime_cputicks(void);
624 int64
runtime_tickspersecond(void);
625 void runtime_blockevent(int64
, int32
);
626 extern int64 runtime_blockprofilerate
;
627 void runtime_addtimer(Timer
*);
628 bool runtime_deltimer(Timer
*);
629 G
* runtime_netpoll(bool);
630 void runtime_netpollinit(void);
631 int32
runtime_netpollopen(uintptr
, PollDesc
*);
632 int32
runtime_netpollclose(uintptr
);
633 void runtime_netpollready(G
**, PollDesc
*, int32
);
634 uintptr
runtime_netpollfd(PollDesc
*);
635 void runtime_netpollarm(PollDesc
*, int32
);
636 void** runtime_netpolluser(PollDesc
*);
637 bool runtime_netpollclosing(PollDesc
*);
638 void runtime_netpolllock(PollDesc
*);
639 void runtime_netpollunlock(PollDesc
*);
640 void runtime_crash(void);
641 void runtime_parsedebugvars(void);
643 void* runtime_funcdata(Func
*, int32
);
644 int32
runtime_setmaxthreads(int32
);
645 G
* runtime_timejump(void);
646 void runtime_iterate_finq(void (*callback
)(FuncVal
*, void*, const FuncType
*, const PtrType
*));
648 void runtime_stoptheworld(void);
649 void runtime_starttheworld(void);
650 extern uint32 runtime_worldsema
;
653 * mutual exclusion locks. in the uncontended case,
654 * as fast as spin locks (just a few user-level instructions),
655 * but on the contention path they sleep in the kernel.
656 * a zeroed Lock is unlocked (no need to initialize each lock).
658 void runtime_lock(Lock
*);
659 void runtime_unlock(Lock
*);
662 * sleep and wakeup on one-time events.
663 * before any calls to notesleep or notewakeup,
664 * must call noteclear to initialize the Note.
665 * then, exactly one thread can call notesleep
666 * and exactly one thread can call notewakeup (once).
667 * once notewakeup has been called, the notesleep
668 * will return. future notesleep will return immediately.
669 * subsequent noteclear must be called only after
670 * previous notesleep has returned, e.g. it's disallowed
671 * to call noteclear straight after notewakeup.
673 * notetsleep is like notesleep but wakes up after
674 * a given number of nanoseconds even if the event
675 * has not yet happened. if a goroutine uses notetsleep to
676 * wake up early, it must wait to call noteclear until it
677 * can be sure that no other goroutine is calling
680 * notesleep/notetsleep are generally called on g0,
681 * notetsleepg is similar to notetsleep but is called on user g.
683 void runtime_noteclear(Note
*);
684 void runtime_notesleep(Note
*);
685 void runtime_notewakeup(Note
*);
686 bool runtime_notetsleep(Note
*, int64
); // false - timeout
687 bool runtime_notetsleepg(Note
*, int64
); // false - timeout
690 * low-level synchronization for implementing the above
692 uintptr
runtime_semacreate(void);
693 int32
runtime_semasleep(int64
);
694 void runtime_semawakeup(M
*);
696 void runtime_futexsleep(uint32
*, uint32
, int64
);
697 void runtime_futexwakeup(uint32
*, uint32
);
701 * Initialize uint64 head to 0, compare with 0 to test for emptiness.
702 * The stack does not keep pointers to nodes,
703 * so they can be garbage collected if there are no other pointers to nodes.
705 void runtime_lfstackpush(uint64
*head
, LFNode
*node
)
706 __asm__ (GOSYM_PREFIX
"runtime.lfstackpush");
707 LFNode
* runtime_lfstackpop(uint64
*head
);
710 * Parallel for over [0, n).
711 * body() is executed for each iteration.
712 * nthr - total number of worker threads.
713 * ctx - arbitrary user context.
714 * if wait=true, threads return from parfor() when all work is done;
715 * otherwise, threads can return while other threads are still finishing processing.
717 ParFor
* runtime_parforalloc(uint32 nthrmax
);
718 void runtime_parforsetup(ParFor
*desc
, uint32 nthr
, uint32 n
, void *ctx
, bool wait
, void (*body
)(ParFor
*, uint32
));
719 void runtime_parfordo(ParFor
*desc
);
720 void runtime_parforiters(ParFor
*, uintptr
, uintptr
*, uintptr
*);
725 #define runtime_mmap mmap
726 #define runtime_munmap munmap
727 #define runtime_madvise madvise
728 #define runtime_memclr(buf, size) __builtin_memset((buf), 0, (size))
729 #define runtime_getcallerpc(p) __builtin_return_address(0)
732 void __wrap_rtems_task_variable_add(void **);
736 * Names generated by gccgo.
738 #define runtime_printbool __go_print_bool
739 #define runtime_printfloat __go_print_double
740 #define runtime_printint __go_print_int64
741 #define runtime_printiface __go_print_interface
742 #define runtime_printeface __go_print_empty_interface
743 #define runtime_printstring __go_print_string
744 #define runtime_printpointer __go_print_pointer
745 #define runtime_printuint __go_print_uint64
746 #define runtime_printslice __go_print_slice
747 #define runtime_printcomplex __go_print_complex
752 void runtime_printbool(_Bool
);
753 void runtime_printbyte(int8
);
754 void runtime_printfloat(double);
755 void runtime_printint(int64
);
756 void runtime_printiface(Iface
);
757 void runtime_printeface(Eface
);
758 void runtime_printstring(String
);
759 void runtime_printpc(void*);
760 void runtime_printpointer(void*);
761 void runtime_printuint(uint64
);
762 void runtime_printhex(uint64
);
763 void runtime_printslice(Slice
);
764 void runtime_printcomplex(complex double);
765 void reflect_call(const struct __go_func_type
*, FuncVal
*, _Bool
, _Bool
,
767 __asm__ (GOSYM_PREFIX
"reflect.call");
768 #define runtime_panic __go_panic
771 * runtime c-called (but written in Go)
773 void runtime_printany(Eface
)
774 __asm__ (GOSYM_PREFIX
"runtime.Printany");
775 void runtime_newTypeAssertionError(const String
*, const String
*, const String
*, const String
*, Eface
*)
776 __asm__ (GOSYM_PREFIX
"runtime.NewTypeAssertionError");
777 void runtime_newErrorString(String
, Eface
*)
778 __asm__ (GOSYM_PREFIX
"runtime.NewErrorString");
779 void runtime_newErrorCString(const char*, Eface
*)
780 __asm__ (GOSYM_PREFIX
"runtime.NewErrorCString");
783 * wrapped for go users
785 void runtime_semacquire(uint32
volatile *, bool);
786 void runtime_semrelease(uint32
volatile *);
787 int32
runtime_gomaxprocsfunc(int32 n
);
788 void runtime_procyield(uint32
);
789 void runtime_osyield(void);
790 void runtime_lockOSThread(void);
791 void runtime_unlockOSThread(void);
792 bool runtime_lockedOSThread(void);
794 bool runtime_showframe(String
, bool);
795 void runtime_printcreatedby(G
*);
797 uintptr
runtime_memlimit(void);
799 #define ISNAN(f) __builtin_isnan(f)
806 #define runtime_setitimer setitimer
808 void runtime_check(void);
810 // A list of global variables that the garbage collector must scan.
812 struct root_list
*next
;
819 void __go_register_gc_roots(struct root_list
*);
821 // Size of stack space allocated using Go's allocator.
822 // This will be 0 when using split stacks, as in that case
823 // the stacks are allocated by the splitstack library.
824 extern uintptr runtime_stacks_sys
;
826 struct backtrace_state
;
827 extern struct backtrace_state
*__go_get_backtrace_state(void);
828 extern _Bool
__go_file_line(uintptr
, String
*, String
*, intgo
*);
829 extern byte
* runtime_progname();
830 extern void runtime_main(void*);
831 extern uint32 runtime_in_callers
;
833 int32
getproccount(void);
835 #define PREFETCH(p) __builtin_prefetch(p)
837 void __go_set_closure(void*);
838 void* __go_get_closure(void);
840 bool runtime_gcwaiting(void);
841 void runtime_badsignal(int);
842 Defer
* runtime_newdefer(void);
843 void runtime_freedefer(Defer
*);
851 struct time_now_ret
now() __asm__ (GOSYM_PREFIX
"time.now")
852 __attribute__ ((no_split_stack
));