* tree-vect-loop-manip.c (vect_do_peeling): Do not use
[official-gcc.git] / libgo / runtime / runtime_c.c
blob9a6672d602ebab8ed2da4f343aa89a7e10e7efcd
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 #include <errno.h>
6 #include <signal.h>
7 #include <unistd.h>
9 #if defined(__i386__) || defined(__x86_64__)
10 #include <cpuid.h>
11 #endif
13 #ifdef __linux__
14 #include <syscall.h>
15 #endif
17 #include "config.h"
19 #include "runtime.h"
20 #include "arch.h"
21 #include "array.h"
23 int32
24 runtime_atoi(const byte *p, intgo len)
26 int32 n;
28 n = 0;
29 while(len > 0 && '0' <= *p && *p <= '9') {
30 n = n*10 + *p++ - '0';
31 len--;
33 return n;
36 uint32
37 runtime_fastrand(void)
39 M *m;
40 uint32 x;
42 m = runtime_m();
43 x = m->fastrand;
44 x += x;
45 if(x & 0x80000000L)
46 x ^= 0x88888eefUL;
47 m->fastrand = x;
48 return x;
51 int64
52 runtime_cputicks(void)
54 #if defined(__386__) || defined(__x86_64__)
55 uint32 low, high;
56 asm("rdtsc" : "=a" (low), "=d" (high));
57 return (int64)(((uint64)high << 32) | (uint64)low);
58 #elif defined (__s390__) || defined (__s390x__)
59 uint64 clock = 0;
60 /* stckf may not write the return variable in case of a clock error, so make
61 it read-write to prevent that the initialisation is optimised out.
62 Note: Targets below z9-109 will crash when executing store clock fast, i.e.
63 we don't support Go for machines older than that. */
64 asm volatile(".insn s,0xb27c0000,%0" /* stckf */ : "+Q" (clock) : : "cc" );
65 return (int64)clock;
66 #else
67 // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand().
68 // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler.
69 // TODO: need more entropy to better seed fastrand.
70 return runtime_nanotime();
71 #endif
74 void
75 runtime_signalstack(byte *p, uintptr n)
77 stack_t st;
79 st.ss_sp = p;
80 st.ss_size = n;
81 st.ss_flags = 0;
82 if(p == nil)
83 st.ss_flags = SS_DISABLE;
84 if(sigaltstack(&st, nil) < 0)
85 *(int *)0xf1 = 0xf1;
88 int32 go_open(char *, int32, int32)
89 __asm__ (GOSYM_PREFIX "runtime.open");
91 int32
92 go_open(char *name, int32 mode, int32 perm)
94 return runtime_open(name, mode, perm);
97 int32 go_read(int32, void *, int32)
98 __asm__ (GOSYM_PREFIX "runtime.read");
100 int32
101 go_read(int32 fd, void *p, int32 n)
103 return runtime_read(fd, p, n);
106 int32 go_write(uintptr, void *, int32)
107 __asm__ (GOSYM_PREFIX "runtime.write");
109 int32
110 go_write(uintptr fd, void *p, int32 n)
112 return runtime_write(fd, p, n);
115 int32 go_closefd(int32)
116 __asm__ (GOSYM_PREFIX "runtime.closefd");
118 int32
119 go_closefd(int32 fd)
121 return runtime_close(fd);
124 intgo go_errno(void)
125 __asm__ (GOSYM_PREFIX "runtime.errno");
127 intgo
128 go_errno()
130 return (intgo)errno;
133 uintptr getEnd(void)
134 __asm__ (GOSYM_PREFIX "runtime.getEnd");
136 uintptr
137 getEnd()
139 #ifdef _AIX
140 // mmap adresses range start at 0x30000000 on AIX for 32 bits processes
141 uintptr end = 0x30000000U;
142 #else
143 uintptr end = 0;
144 uintptr *pend;
146 pend = &__go_end;
147 if (pend != nil) {
148 end = *pend;
150 #endif
152 return end;
155 // CPU-specific initialization.
156 // Fetch CPUID info on x86.
158 void
159 runtime_cpuinit()
161 #if defined(__i386__) || defined(__x86_64__)
162 unsigned int eax, ebx, ecx, edx;
164 if (__get_cpuid(1, &eax, &ebx, &ecx, &edx)) {
165 setCpuidECX(ecx);
168 #if defined(HAVE_AS_X86_AES)
169 setSupportAES(true);
170 #endif
171 #endif
174 // A publication barrier: a store/store barrier.
176 void publicationBarrier(void)
177 __asm__ (GOSYM_PREFIX "runtime.publicationBarrier");
179 void
180 publicationBarrier()
182 __atomic_thread_fence(__ATOMIC_RELEASE);
185 #ifdef __linux__
187 /* Currently sbrk0 is only called on GNU/Linux. */
189 uintptr sbrk0(void)
190 __asm__ (GOSYM_PREFIX "runtime.sbrk0");
192 uintptr
193 sbrk0()
195 return syscall(SYS_brk, (uintptr)(0));
198 #endif /* __linux__ */