runtime: scan register backing store on ia64
[official-gcc.git] / libgo / go / runtime / time.go
blobb70759026089dcce5bdef18a1175020f7e45982a
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Time-related runtime and pieces of package time.
7 package runtime
9 import (
10 "runtime/internal/sys"
11 "unsafe"
14 // Package time knows the layout of this structure.
15 // If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
16 // For GOOS=nacl, package syscall knows the layout of this structure.
17 // If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
18 type timer struct {
19 tb *timersBucket // the bucket the timer lives in
20 i int // heap index
22 // Timer wakes up at when, and then at when+period, ... (period > 0 only)
23 // each time calling f(arg, now) in the timer goroutine, so f must be
24 // a well-behaved function and not block.
25 when int64
26 period int64
27 f func(interface{}, uintptr)
28 arg interface{}
29 seq uintptr
32 // timersLen is the length of timers array.
34 // Ideally, this would be set to GOMAXPROCS, but that would require
35 // dynamic reallocation
37 // The current value is a compromise between memory usage and performance
38 // that should cover the majority of GOMAXPROCS values used in the wild.
39 const timersLen = 64
41 // timers contains "per-P" timer heaps.
43 // Timers are queued into timersBucket associated with the current P,
44 // so each P may work with its own timers independently of other P instances.
46 // Each timersBucket may be associated with multiple P
47 // if GOMAXPROCS > timersLen.
48 var timers [timersLen]struct {
49 timersBucket
51 // The padding should eliminate false sharing
52 // between timersBucket values.
53 pad [sys.CacheLineSize - unsafe.Sizeof(timersBucket{})%sys.CacheLineSize]byte
56 func (t *timer) assignBucket() *timersBucket {
57 id := uint8(getg().m.p.ptr().id) % timersLen
58 t.tb = &timers[id].timersBucket
59 return t.tb
62 //go:notinheap
63 type timersBucket struct {
64 lock mutex
65 gp *g
66 created bool
67 sleeping bool
68 rescheduling bool
69 sleepUntil int64
70 waitnote note
71 t []*timer
74 // nacl fake time support - time in nanoseconds since 1970
75 var faketime int64
77 // Package time APIs.
78 // Godoc uses the comments in package time, not these.
80 // time.now is implemented in assembly.
82 // timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
83 //go:linkname timeSleep time.Sleep
84 func timeSleep(ns int64) {
85 if ns <= 0 {
86 return
89 gp := getg()
90 t := gp.timer
91 if t == nil {
92 t = new(timer)
93 gp.timer = t
95 *t = timer{}
96 t.when = nanotime() + ns
97 t.f = goroutineReady
98 t.arg = gp
99 tb := t.assignBucket()
100 lock(&tb.lock)
101 tb.addtimerLocked(t)
102 goparkunlock(&tb.lock, "sleep", traceEvGoSleep, 2)
105 // startTimer adds t to the timer heap.
106 //go:linkname startTimer time.startTimer
107 func startTimer(t *timer) {
108 if raceenabled {
109 racerelease(unsafe.Pointer(t))
111 addtimer(t)
114 // stopTimer removes t from the timer heap if it is there.
115 // It returns true if t was removed, false if t wasn't even there.
116 //go:linkname stopTimer time.stopTimer
117 func stopTimer(t *timer) bool {
118 return deltimer(t)
121 // Go runtime.
123 // Ready the goroutine arg.
124 func goroutineReady(arg interface{}, seq uintptr) {
125 goready(arg.(*g), 0)
128 func addtimer(t *timer) {
129 tb := t.assignBucket()
130 lock(&tb.lock)
131 tb.addtimerLocked(t)
132 unlock(&tb.lock)
135 // Add a timer to the heap and start or kick timerproc if the new timer is
136 // earlier than any of the others.
137 // Timers are locked.
138 func (tb *timersBucket) addtimerLocked(t *timer) {
139 // when must never be negative; otherwise timerproc will overflow
140 // during its delta calculation and never expire other runtime timers.
141 if t.when < 0 {
142 t.when = 1<<63 - 1
144 t.i = len(tb.t)
145 tb.t = append(tb.t, t)
146 siftupTimer(tb.t, t.i)
147 if t.i == 0 {
148 // siftup moved to top: new earliest deadline.
149 if tb.sleeping {
150 tb.sleeping = false
151 notewakeup(&tb.waitnote)
153 if tb.rescheduling {
154 tb.rescheduling = false
155 goready(tb.gp, 0)
158 if !tb.created {
159 tb.created = true
160 expectSystemGoroutine()
161 go timerproc(tb)
165 // Delete timer t from the heap.
166 // Do not need to update the timerproc: if it wakes up early, no big deal.
167 func deltimer(t *timer) bool {
168 if t.tb == nil {
169 // t.tb can be nil if the user created a timer
170 // directly, without invoking startTimer e.g
171 // time.Ticker{C: c}
172 // In this case, return early without any deletion.
173 // See Issue 21874.
174 return false
177 tb := t.tb
179 lock(&tb.lock)
180 // t may not be registered anymore and may have
181 // a bogus i (typically 0, if generated by Go).
182 // Verify it before proceeding.
183 i := t.i
184 last := len(tb.t) - 1
185 if i < 0 || i > last || tb.t[i] != t {
186 unlock(&tb.lock)
187 return false
189 if i != last {
190 tb.t[i] = tb.t[last]
191 tb.t[i].i = i
193 tb.t[last] = nil
194 tb.t = tb.t[:last]
195 if i != last {
196 siftupTimer(tb.t, i)
197 siftdownTimer(tb.t, i)
199 unlock(&tb.lock)
200 return true
203 // Timerproc runs the time-driven events.
204 // It sleeps until the next event in the tb heap.
205 // If addtimer inserts a new earlier event, it wakes timerproc early.
206 func timerproc(tb *timersBucket) {
207 setSystemGoroutine()
209 tb.gp = getg()
210 for {
211 lock(&tb.lock)
212 tb.sleeping = false
213 now := nanotime()
214 delta := int64(-1)
215 for {
216 if len(tb.t) == 0 {
217 delta = -1
218 break
220 t := tb.t[0]
221 delta = t.when - now
222 if delta > 0 {
223 break
225 if t.period > 0 {
226 // leave in heap but adjust next time to fire
227 t.when += t.period * (1 + -delta/t.period)
228 siftdownTimer(tb.t, 0)
229 } else {
230 // remove from heap
231 last := len(tb.t) - 1
232 if last > 0 {
233 tb.t[0] = tb.t[last]
234 tb.t[0].i = 0
236 tb.t[last] = nil
237 tb.t = tb.t[:last]
238 if last > 0 {
239 siftdownTimer(tb.t, 0)
241 t.i = -1 // mark as removed
243 f := t.f
244 arg := t.arg
245 seq := t.seq
246 unlock(&tb.lock)
247 if raceenabled {
248 raceacquire(unsafe.Pointer(t))
250 f(arg, seq)
251 lock(&tb.lock)
253 if delta < 0 || faketime > 0 {
254 // No timers left - put goroutine to sleep.
255 tb.rescheduling = true
256 goparkunlock(&tb.lock, "timer goroutine (idle)", traceEvGoBlock, 1)
257 continue
259 // At least one timer pending. Sleep until then.
260 tb.sleeping = true
261 tb.sleepUntil = now + delta
262 noteclear(&tb.waitnote)
263 unlock(&tb.lock)
264 notetsleepg(&tb.waitnote, delta)
268 func timejump() *g {
269 if faketime == 0 {
270 return nil
273 for i := range timers {
274 lock(&timers[i].lock)
276 gp := timejumpLocked()
277 for i := range timers {
278 unlock(&timers[i].lock)
281 return gp
284 func timejumpLocked() *g {
285 // Determine a timer bucket with minimum when.
286 var minT *timer
287 for i := range timers {
288 tb := &timers[i]
289 if !tb.created || len(tb.t) == 0 {
290 continue
292 t := tb.t[0]
293 if minT == nil || t.when < minT.when {
294 minT = t
297 if minT == nil || minT.when <= faketime {
298 return nil
301 faketime = minT.when
302 tb := minT.tb
303 if !tb.rescheduling {
304 return nil
306 tb.rescheduling = false
307 return tb.gp
310 func timeSleepUntil() int64 {
311 next := int64(1<<63 - 1)
313 // Determine minimum sleepUntil across all the timer buckets.
315 // The function can not return a precise answer,
316 // as another timer may pop in as soon as timers have been unlocked.
317 // So lock the timers one by one instead of all at once.
318 for i := range timers {
319 tb := &timers[i]
321 lock(&tb.lock)
322 if tb.sleeping && tb.sleepUntil < next {
323 next = tb.sleepUntil
325 unlock(&tb.lock)
328 return next
331 // Heap maintenance algorithms.
333 func siftupTimer(t []*timer, i int) {
334 when := t[i].when
335 tmp := t[i]
336 for i > 0 {
337 p := (i - 1) / 4 // parent
338 if when >= t[p].when {
339 break
341 t[i] = t[p]
342 t[i].i = i
343 i = p
345 if tmp != t[i] {
346 t[i] = tmp
347 t[i].i = i
351 func siftdownTimer(t []*timer, i int) {
352 n := len(t)
353 when := t[i].when
354 tmp := t[i]
355 for {
356 c := i*4 + 1 // left child
357 c3 := c + 2 // mid child
358 if c >= n {
359 break
361 w := t[c].when
362 if c+1 < n && t[c+1].when < w {
363 w = t[c+1].when
366 if c3 < n {
367 w3 := t[c3].when
368 if c3+1 < n && t[c3+1].when < w3 {
369 w3 = t[c3+1].when
370 c3++
372 if w3 < w {
373 w = w3
374 c = c3
377 if w >= when {
378 break
380 t[i] = t[c]
381 t[i].i = i
382 i = c
384 if tmp != t[i] {
385 t[i] = tmp
386 t[i].i = i
390 // Entry points for net, time to call nanotime.
392 //go:linkname poll_runtimeNano internal_poll.runtimeNano
393 func poll_runtimeNano() int64 {
394 return nanotime()
397 //go:linkname time_runtimeNano time.runtimeNano
398 func time_runtimeNano() int64 {
399 return nanotime()
402 // Monotonic times are reported as offsets from startNano.
403 // We initialize startNano to nanotime() - 1 so that on systems where
404 // monotonic time resolution is fairly low (e.g. Windows 2008
405 // which appears to have a default resolution of 15ms),
406 // we avoid ever reporting a nanotime of 0.
407 // (Callers may want to use 0 as "time not set".)
408 var startNano int64 = nanotime() - 1