PR fortran/77666
[official-gcc.git] / libgo / runtime / runtime.c
blob9abd096555d2a6cdaf7b37a65fa72aa76efddd8e
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 #include <signal.h>
6 #include <unistd.h>
8 #include "config.h"
10 #include "runtime.h"
11 #include "arch.h"
12 #include "array.h"
14 enum {
15 maxround = sizeof(uintptr),
18 // Keep a cached value to make gotraceback fast,
19 // since we call it on every call to gentraceback.
20 // The cached value is a uint32 in which the low bit
21 // is the "crash" setting and the top 31 bits are the
22 // gotraceback value.
23 enum {
24 tracebackCrash = 1 << 0,
25 tracebackAll = 1 << 1,
26 tracebackShift = 2,
28 static uint32 traceback_cache = 2 << tracebackShift;
29 static uint32 traceback_env;
31 extern volatile intgo runtime_MemProfileRate
32 __asm__ (GOSYM_PREFIX "runtime.MemProfileRate");
35 // gotraceback returns the current traceback settings.
37 // If level is 0, suppress all tracebacks.
38 // If level is 1, show tracebacks, but exclude runtime frames.
39 // If level is 2, show tracebacks including runtime frames.
40 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
41 // If crash is set, crash (core dump, etc) after tracebacking.
42 int32
43 runtime_gotraceback(bool *crash)
45 uint32 x;
47 if(crash != nil)
48 *crash = false;
49 if(runtime_m()->traceback != 0)
50 return runtime_m()->traceback;
51 x = runtime_atomicload(&traceback_cache);
52 if(crash != nil)
53 *crash = x&tracebackCrash;
54 return x>>tracebackShift;
57 static int32 argc;
58 static byte** argv;
60 static Slice args;
61 Slice envs;
63 void (*runtime_sysargs)(int32, uint8**);
65 void
66 runtime_args(int32 c, byte **v)
68 argc = c;
69 argv = v;
70 if(runtime_sysargs != nil)
71 runtime_sysargs(c, v);
74 byte*
75 runtime_progname()
77 return argc == 0 ? nil : argv[0];
80 void
81 runtime_goargs(void)
83 String *s;
84 int32 i;
86 // for windows implementation see "os" package
87 if(Windows)
88 return;
90 s = runtime_malloc(argc*sizeof s[0]);
91 for(i=0; i<argc; i++)
92 s[i] = runtime_gostringnocopy((const byte*)argv[i]);
93 args.__values = (void*)s;
94 args.__count = argc;
95 args.__capacity = argc;
98 void
99 runtime_goenvs_unix(void)
101 String *s;
102 int32 i, n;
104 for(n=0; argv[argc+1+n] != 0; n++)
107 s = runtime_malloc(n*sizeof s[0]);
108 for(i=0; i<n; i++)
109 s[i] = runtime_gostringnocopy(argv[argc+1+i]);
110 envs.__values = (void*)s;
111 envs.__count = n;
112 envs.__capacity = n;
115 // Called from the syscall package.
116 Slice runtime_envs(void) __asm__ (GOSYM_PREFIX "syscall.runtime_envs");
118 Slice
119 runtime_envs()
121 return envs;
124 Slice os_runtime_args(void) __asm__ (GOSYM_PREFIX "os.runtime_args");
126 Slice
127 os_runtime_args()
129 return args;
132 int32
133 runtime_atoi(const byte *p, intgo len)
135 int32 n;
137 n = 0;
138 while(len > 0 && '0' <= *p && *p <= '9') {
139 n = n*10 + *p++ - '0';
140 len--;
142 return n;
145 static struct root_list runtime_roots =
146 { nil,
147 { { &envs, sizeof envs },
148 { &args, sizeof args },
149 { nil, 0 } },
152 static void
153 TestAtomic64(void)
155 uint64 z64, x64;
157 z64 = 42;
158 x64 = 0;
159 PREFETCH(&z64);
160 if(runtime_cas64(&z64, x64, 1))
161 runtime_throw("cas64 failed");
162 if(x64 != 0)
163 runtime_throw("cas64 failed");
164 x64 = 42;
165 if(!runtime_cas64(&z64, x64, 1))
166 runtime_throw("cas64 failed");
167 if(x64 != 42 || z64 != 1)
168 runtime_throw("cas64 failed");
169 if(runtime_atomicload64(&z64) != 1)
170 runtime_throw("load64 failed");
171 runtime_atomicstore64(&z64, (1ull<<40)+1);
172 if(runtime_atomicload64(&z64) != (1ull<<40)+1)
173 runtime_throw("store64 failed");
174 if(runtime_xadd64(&z64, (1ull<<40)+1) != (2ull<<40)+2)
175 runtime_throw("xadd64 failed");
176 if(runtime_atomicload64(&z64) != (2ull<<40)+2)
177 runtime_throw("xadd64 failed");
178 if(runtime_xchg64(&z64, (3ull<<40)+3) != (2ull<<40)+2)
179 runtime_throw("xchg64 failed");
180 if(runtime_atomicload64(&z64) != (3ull<<40)+3)
181 runtime_throw("xchg64 failed");
184 void
185 runtime_check(void)
187 __go_register_gc_roots(&runtime_roots);
189 TestAtomic64();
192 uint32
193 runtime_fastrand1(void)
195 M *m;
196 uint32 x;
198 m = runtime_m();
199 x = m->fastrand;
200 x += x;
201 if(x & 0x80000000L)
202 x ^= 0x88888eefUL;
203 m->fastrand = x;
204 return x;
207 int64
208 runtime_cputicks(void)
210 #if defined(__386__) || defined(__x86_64__)
211 uint32 low, high;
212 asm("rdtsc" : "=a" (low), "=d" (high));
213 return (int64)(((uint64)high << 32) | (uint64)low);
214 #elif defined (__s390__) || defined (__s390x__)
215 uint64 clock = 0;
216 /* stckf may not write the return variable in case of a clock error, so make
217 it read-write to prevent that the initialisation is optimised out.
218 Note: Targets below z9-109 will crash when executing store clock fast, i.e.
219 we don't support Go for machines older than that. */
220 asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock) : : "cc" );
221 return (int64)clock;
222 #else
223 // FIXME: implement for other processors.
224 return 0;
225 #endif
228 bool
229 runtime_showframe(String s, bool current)
231 static int32 traceback = -1;
233 if(current && runtime_m()->throwing > 0)
234 return 1;
235 if(traceback < 0)
236 traceback = runtime_gotraceback(nil);
237 return traceback > 1 || (__builtin_memchr(s.str, '.', s.len) != nil && __builtin_memcmp(s.str, "runtime.", 7) != 0);
240 static Lock ticksLock;
241 static int64 ticks;
243 int64
244 runtime_tickspersecond(void)
246 int64 res, t0, t1, c0, c1;
248 res = (int64)runtime_atomicload64((uint64*)&ticks);
249 if(res != 0)
250 return ticks;
251 runtime_lock(&ticksLock);
252 res = ticks;
253 if(res == 0) {
254 t0 = runtime_nanotime();
255 c0 = runtime_cputicks();
256 runtime_usleep(100*1000);
257 t1 = runtime_nanotime();
258 c1 = runtime_cputicks();
259 if(t1 == t0)
260 t1++;
261 res = (c1-c0)*1000*1000*1000/(t1-t0);
262 if(res == 0)
263 res++;
264 runtime_atomicstore64((uint64*)&ticks, res);
266 runtime_unlock(&ticksLock);
267 return res;
270 // Called to initialize a new m (including the bootstrap m).
271 // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
272 void
273 runtime_mpreinit(M *mp)
275 int32 stacksize = 32 * 1024; // OS X wants >=8K, Linux >=2K
277 #ifdef SIGSTKSZ
278 if(stacksize < SIGSTKSZ)
279 stacksize = SIGSTKSZ;
280 #endif
282 mp->gsignal = runtime_malg(stacksize, (byte**)&mp->gsignalstack, &mp->gsignalstacksize);
283 mp->gsignal->m = mp;
286 // Called to initialize a new m (including the bootstrap m).
287 // Called on the new thread, can not allocate memory.
288 void
289 runtime_minit(void)
291 M* m;
292 sigset_t sigs;
294 // Initialize signal handling.
295 m = runtime_m();
296 runtime_signalstack(m->gsignalstack, m->gsignalstacksize);
297 if (sigemptyset(&sigs) != 0)
298 runtime_throw("sigemptyset");
299 pthread_sigmask(SIG_SETMASK, &sigs, nil);
302 // Called from dropm to undo the effect of an minit.
303 void
304 runtime_unminit(void)
306 runtime_signalstack(nil, 0);
310 void
311 runtime_signalstack(byte *p, int32 n)
313 stack_t st;
315 st.ss_sp = p;
316 st.ss_size = n;
317 st.ss_flags = 0;
318 if(p == nil)
319 st.ss_flags = SS_DISABLE;
320 if(sigaltstack(&st, nil) < 0)
321 *(int *)0xf1 = 0xf1;
324 void setTraceback(String level)
325 __asm__ (GOSYM_PREFIX "runtime_debug.SetTraceback");
327 void setTraceback(String level) {
328 uint32 t;
330 if (level.len == 4 && __builtin_memcmp(level.str, "none", 4) == 0) {
331 t = 0;
332 } else if (level.len == 0 || (level.len == 6 && __builtin_memcmp(level.str, "single", 6) == 0)) {
333 t = 1 << tracebackShift;
334 } else if (level.len == 3 && __builtin_memcmp(level.str, "all", 3) == 0) {
335 t = (1<<tracebackShift) | tracebackAll;
336 } else if (level.len == 6 && __builtin_memcmp(level.str, "system", 6) == 0) {
337 t = (2<<tracebackShift) | tracebackAll;
338 } else if (level.len == 5 && __builtin_memcmp(level.str, "crash", 5) == 0) {
339 t = (2<<tracebackShift) | tracebackAll | tracebackCrash;
340 } else {
341 t = (runtime_atoi(level.str, level.len)<<tracebackShift) | tracebackAll;
344 t |= traceback_env;
346 runtime_atomicstore(&traceback_cache, t);
349 DebugVars runtime_debug;
351 // Holds variables parsed from GODEBUG env var,
352 // except for "memprofilerate" since there is an
353 // existing var for that value which is int
354 // instead of in32 and might have an
355 // initial value.
356 static struct {
357 const char* name;
358 int32* value;
359 } dbgvar[] = {
360 {"allocfreetrace", &runtime_debug.allocfreetrace},
361 {"cgocheck", &runtime_debug.cgocheck},
362 {"efence", &runtime_debug.efence},
363 {"gccheckmark", &runtime_debug.gccheckmark},
364 {"gcpacertrace", &runtime_debug.gcpacertrace},
365 {"gcshrinkstackoff", &runtime_debug.gcshrinkstackoff},
366 {"gcstackbarrieroff", &runtime_debug.gcstackbarrieroff},
367 {"gcstackbarrierall", &runtime_debug.gcstackbarrierall},
368 {"gcstoptheworld", &runtime_debug.gcstoptheworld},
369 {"gctrace", &runtime_debug.gctrace},
370 {"gcdead", &runtime_debug.gcdead},
371 {"invalidptr", &runtime_debug.invalidptr},
372 {"sbrk", &runtime_debug.sbrk},
373 {"scavenge", &runtime_debug.scavenge},
374 {"scheddetail", &runtime_debug.scheddetail},
375 {"schedtrace", &runtime_debug.schedtrace},
376 {"wbshadow", &runtime_debug.wbshadow},
379 void
380 runtime_parsedebugvars(void)
382 String s;
383 const byte *p, *pn;
384 intgo len;
385 intgo i, n;
387 s = runtime_getenv("GODEBUG");
388 if(s.len == 0)
389 return;
390 p = s.str;
391 len = s.len;
392 for(;;) {
393 for(i=0; i<(intgo)nelem(dbgvar); i++) {
394 n = runtime_findnull((const byte*)dbgvar[i].name);
395 if(len > n && runtime_mcmp(p, "memprofilerate", n) == 0 && p[n] == '=')
396 // Set the MemProfileRate directly since it
397 // is an int, not int32, and should only lbe
398 // set here if specified by GODEBUG
399 runtime_MemProfileRate = runtime_atoi(p+n+1, len-(n+1));
400 else if(len > n && runtime_mcmp(p, dbgvar[i].name, n) == 0 && p[n] == '=')
401 *dbgvar[i].value = runtime_atoi(p+n+1, len-(n+1));
403 pn = (const byte *)runtime_strstr((const char *)p, ",");
404 if(pn == nil || pn - p >= len)
405 break;
406 len -= (pn - p) - 1;
407 p = pn + 1;
410 setTraceback(runtime_getenv("GOTRACEBACK"));
411 traceback_env = traceback_cache;
414 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
415 // the "environment" traceback level, so later calls to
416 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
417 void SetTracebackEnv(String level)
418 __asm__ (GOSYM_PREFIX "runtime.SetTracebackEnv");
420 void SetTracebackEnv(String level) {
421 setTraceback(level);
422 traceback_env = traceback_cache;
425 // Poor mans 64-bit division.
426 // This is a very special function, do not use it if you are not sure what you are doing.
427 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
428 // Handles overflow in a time-specific manner.
429 int32
430 runtime_timediv(int64 v, int32 div, int32 *rem)
432 int32 res, bit;
434 if(v >= (int64)div*0x7fffffffLL) {
435 if(rem != nil)
436 *rem = 0;
437 return 0x7fffffff;
439 res = 0;
440 for(bit = 30; bit >= 0; bit--) {
441 if(v >= ((int64)div<<bit)) {
442 v = v - ((int64)div<<bit);
443 res += 1<<bit;
446 if(rem != nil)
447 *rem = v;
448 return res;
451 // Setting the max stack size doesn't really do anything for gccgo.
453 uintptr runtime_maxstacksize = 1<<20; // enough until runtime.main sets it for real
455 void memclrBytes(Slice)
456 __asm__ (GOSYM_PREFIX "runtime.memclrBytes");
458 void
459 memclrBytes(Slice s)
461 runtime_memclr(s.__values, s.__count);