1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
15 maxround
= sizeof(uintptr
),
18 // Keep a cached value to make gotraceback fast,
19 // since we call it on every call to gentraceback.
20 // The cached value is a uint32 in which the low bit
21 // is the "crash" setting and the top 31 bits are the
24 tracebackCrash
= 1 << 0,
25 tracebackAll
= 1 << 1,
28 static uint32 traceback_cache
= 2 << tracebackShift
;
29 static uint32 traceback_env
;
31 extern volatile intgo runtime_MemProfileRate
32 __asm__ (GOSYM_PREFIX
"runtime.MemProfileRate");
35 // gotraceback returns the current traceback settings.
37 // If level is 0, suppress all tracebacks.
38 // If level is 1, show tracebacks, but exclude runtime frames.
39 // If level is 2, show tracebacks including runtime frames.
40 // If all is set, print all goroutine stacks. Otherwise, print just the current goroutine.
41 // If crash is set, crash (core dump, etc) after tracebacking.
43 runtime_gotraceback(bool *crash
)
49 if(runtime_m()->traceback
!= 0)
50 return runtime_m()->traceback
;
51 x
= runtime_atomicload(&traceback_cache
);
53 *crash
= x
&tracebackCrash
;
54 return x
>>tracebackShift
;
63 void (*runtime_sysargs
)(int32
, uint8
**);
66 runtime_args(int32 c
, byte
**v
)
70 if(runtime_sysargs
!= nil
)
71 runtime_sysargs(c
, v
);
77 return argc
== 0 ? nil
: argv
[0];
86 // for windows implementation see "os" package
90 s
= runtime_malloc(argc
*sizeof s
[0]);
92 s
[i
] = runtime_gostringnocopy((const byte
*)argv
[i
]);
93 args
.__values
= (void*)s
;
95 args
.__capacity
= argc
;
99 runtime_goenvs_unix(void)
104 for(n
=0; argv
[argc
+1+n
] != 0; n
++)
107 s
= runtime_malloc(n
*sizeof s
[0]);
109 s
[i
] = runtime_gostringnocopy(argv
[argc
+1+i
]);
110 envs
.__values
= (void*)s
;
115 // Called from the syscall package.
116 Slice
runtime_envs(void) __asm__ (GOSYM_PREFIX
"syscall.runtime_envs");
124 Slice
os_runtime_args(void) __asm__ (GOSYM_PREFIX
"os.runtime_args");
133 runtime_atoi(const byte
*p
, intgo len
)
138 while(len
> 0 && '0' <= *p
&& *p
<= '9') {
139 n
= n
*10 + *p
++ - '0';
145 static struct root_list runtime_roots
=
147 { { &envs
, sizeof envs
},
148 { &args
, sizeof args
},
160 if(runtime_cas64(&z64
, x64
, 1))
161 runtime_throw("cas64 failed");
163 runtime_throw("cas64 failed");
165 if(!runtime_cas64(&z64
, x64
, 1))
166 runtime_throw("cas64 failed");
167 if(x64
!= 42 || z64
!= 1)
168 runtime_throw("cas64 failed");
169 if(runtime_atomicload64(&z64
) != 1)
170 runtime_throw("load64 failed");
171 runtime_atomicstore64(&z64
, (1ull<<40)+1);
172 if(runtime_atomicload64(&z64
) != (1ull<<40)+1)
173 runtime_throw("store64 failed");
174 if(runtime_xadd64(&z64
, (1ull<<40)+1) != (2ull<<40)+2)
175 runtime_throw("xadd64 failed");
176 if(runtime_atomicload64(&z64
) != (2ull<<40)+2)
177 runtime_throw("xadd64 failed");
178 if(runtime_xchg64(&z64
, (3ull<<40)+3) != (2ull<<40)+2)
179 runtime_throw("xchg64 failed");
180 if(runtime_atomicload64(&z64
) != (3ull<<40)+3)
181 runtime_throw("xchg64 failed");
187 __go_register_gc_roots(&runtime_roots
);
193 runtime_fastrand1(void)
208 runtime_cputicks(void)
210 #if defined(__386__) || defined(__x86_64__)
212 asm("rdtsc" : "=a" (low
), "=d" (high
));
213 return (int64
)(((uint64
)high
<< 32) | (uint64
)low
);
214 #elif defined (__s390__) || defined (__s390x__)
216 /* stckf may not write the return variable in case of a clock error, so make
217 it read-write to prevent that the initialisation is optimised out.
218 Note: Targets below z9-109 will crash when executing store clock fast, i.e.
219 we don't support Go for machines older than that. */
220 asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock
) : : "cc" );
223 // FIXME: implement for other processors.
229 runtime_showframe(String s
, bool current
)
231 static int32 traceback
= -1;
233 if(current
&& runtime_m()->throwing
> 0)
236 traceback
= runtime_gotraceback(nil
);
237 return traceback
> 1 || (__builtin_memchr(s
.str
, '.', s
.len
) != nil
&& __builtin_memcmp(s
.str
, "runtime.", 7) != 0);
240 static Lock ticksLock
;
244 runtime_tickspersecond(void)
246 int64 res
, t0
, t1
, c0
, c1
;
248 res
= (int64
)runtime_atomicload64((uint64
*)&ticks
);
251 runtime_lock(&ticksLock
);
254 t0
= runtime_nanotime();
255 c0
= runtime_cputicks();
256 runtime_usleep(100*1000);
257 t1
= runtime_nanotime();
258 c1
= runtime_cputicks();
261 res
= (c1
-c0
)*1000*1000*1000/(t1
-t0
);
264 runtime_atomicstore64((uint64
*)&ticks
, res
);
266 runtime_unlock(&ticksLock
);
270 // Called to initialize a new m (including the bootstrap m).
271 // Called on the parent thread (main thread in case of bootstrap), can allocate memory.
273 runtime_mpreinit(M
*mp
)
275 mp
->gsignal
= runtime_malg(32*1024, &mp
->gsignalstack
, &mp
->gsignalstacksize
); // OS X wants >=8K, Linux >=2K
278 // Called to initialize a new m (including the bootstrap m).
279 // Called on the new thread, can not allocate memory.
286 // Initialize signal handling.
288 runtime_signalstack(m
->gsignalstack
, m
->gsignalstacksize
);
289 if (sigemptyset(&sigs
) != 0)
290 runtime_throw("sigemptyset");
291 pthread_sigmask(SIG_SETMASK
, &sigs
, nil
);
294 // Called from dropm to undo the effect of an minit.
296 runtime_unminit(void)
298 runtime_signalstack(nil
, 0);
303 runtime_signalstack(byte
*p
, int32 n
)
311 st
.ss_flags
= SS_DISABLE
;
312 if(sigaltstack(&st
, nil
) < 0)
316 void setTraceback(String level
)
317 __asm__ (GOSYM_PREFIX
"runtime_debug.SetTraceback");
319 void setTraceback(String level
) {
322 if (level
.len
== 4 && __builtin_memcmp(level
.str
, "none", 4) == 0) {
324 } else if (level
.len
== 0 || (level
.len
== 6 && __builtin_memcmp(level
.str
, "single", 6) == 0)) {
325 t
= 1 << tracebackShift
;
326 } else if (level
.len
== 3 && __builtin_memcmp(level
.str
, "all", 3) == 0) {
327 t
= (1<<tracebackShift
) | tracebackAll
;
328 } else if (level
.len
== 6 && __builtin_memcmp(level
.str
, "system", 6) == 0) {
329 t
= (2<<tracebackShift
) | tracebackAll
;
330 } else if (level
.len
== 5 && __builtin_memcmp(level
.str
, "crash", 5) == 0) {
331 t
= (2<<tracebackShift
) | tracebackAll
| tracebackCrash
;
333 t
= (runtime_atoi(level
.str
, level
.len
)<<tracebackShift
) | tracebackAll
;
338 runtime_atomicstore(&traceback_cache
, t
);
341 DebugVars runtime_debug
;
343 // Holds variables parsed from GODEBUG env var,
344 // except for "memprofilerate" since there is an
345 // existing var for that value which is int
346 // instead of in32 and might have an
352 {"allocfreetrace", &runtime_debug
.allocfreetrace
},
353 {"cgocheck", &runtime_debug
.cgocheck
},
354 {"efence", &runtime_debug
.efence
},
355 {"gccheckmark", &runtime_debug
.gccheckmark
},
356 {"gcpacertrace", &runtime_debug
.gcpacertrace
},
357 {"gcshrinkstackoff", &runtime_debug
.gcshrinkstackoff
},
358 {"gcstackbarrieroff", &runtime_debug
.gcstackbarrieroff
},
359 {"gcstackbarrierall", &runtime_debug
.gcstackbarrierall
},
360 {"gcstoptheworld", &runtime_debug
.gcstoptheworld
},
361 {"gctrace", &runtime_debug
.gctrace
},
362 {"gcdead", &runtime_debug
.gcdead
},
363 {"invalidptr", &runtime_debug
.invalidptr
},
364 {"sbrk", &runtime_debug
.sbrk
},
365 {"scavenge", &runtime_debug
.scavenge
},
366 {"scheddetail", &runtime_debug
.scheddetail
},
367 {"schedtrace", &runtime_debug
.schedtrace
},
368 {"wbshadow", &runtime_debug
.wbshadow
},
372 runtime_parsedebugvars(void)
379 s
= runtime_getenv("GODEBUG");
385 for(i
=0; i
<(intgo
)nelem(dbgvar
); i
++) {
386 n
= runtime_findnull((const byte
*)dbgvar
[i
].name
);
387 if(len
> n
&& runtime_mcmp(p
, "memprofilerate", n
) == 0 && p
[n
] == '=')
388 // Set the MemProfileRate directly since it
389 // is an int, not int32, and should only lbe
390 // set here if specified by GODEBUG
391 runtime_MemProfileRate
= runtime_atoi(p
+n
+1, len
-(n
+1));
392 else if(len
> n
&& runtime_mcmp(p
, dbgvar
[i
].name
, n
) == 0 && p
[n
] == '=')
393 *dbgvar
[i
].value
= runtime_atoi(p
+n
+1, len
-(n
+1));
395 pn
= (const byte
*)runtime_strstr((const char *)p
, ",");
396 if(pn
== nil
|| pn
- p
>= len
)
402 setTraceback(runtime_getenv("GOTRACEBACK"));
403 traceback_env
= traceback_cache
;
406 // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises
407 // the "environment" traceback level, so later calls to
408 // debug.SetTraceback (e.g., from testing timeouts) can't lower it.
409 void SetTracebackEnv(String level
)
410 __asm__ (GOSYM_PREFIX
"runtime.SetTracebackEnv");
412 void SetTracebackEnv(String level
) {
414 traceback_env
= traceback_cache
;
417 // Poor mans 64-bit division.
418 // This is a very special function, do not use it if you are not sure what you are doing.
419 // int64 division is lowered into _divv() call on 386, which does not fit into nosplit functions.
420 // Handles overflow in a time-specific manner.
422 runtime_timediv(int64 v
, int32 div
, int32
*rem
)
426 if(v
>= (int64
)div
*0x7fffffffLL
) {
432 for(bit
= 30; bit
>= 0; bit
--) {
433 if(v
>= ((int64
)div
<<bit
)) {
434 v
= v
- ((int64
)div
<<bit
);
443 // Setting the max stack size doesn't really do anything for gccgo.
445 uintptr runtime_maxstacksize
= 1<<20; // enough until runtime.main sets it for real
447 void memclrBytes(Slice
)
448 __asm__ (GOSYM_PREFIX
"runtime.memclrBytes");
453 runtime_memclr(s
.__values
, s
.__count
);