1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
20 // Go runtime uses different Windows timers for time.Now and sleeping.
21 // These can tick at different frequencies and can arrive out of sync.
22 // The effect can be seen, for example, as time.Sleep(100ms) is actually
23 // shorter then 100ms when measured as difference between time.Now before and
24 // after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
25 // windowsInaccuracy is to ignore such errors.
26 const windowsInaccuracy
= 17 * Millisecond
28 func TestSleep(t
*testing
.T
) {
29 const delay
= 100 * Millisecond
37 if runtime
.GOOS
== "windows" {
38 delayadj
-= windowsInaccuracy
40 duration
:= Now().Sub(start
)
41 if duration
< delayadj
{
42 t
.Fatalf("Sleep(%s) slept for only %s", delay
, duration
)
46 // Test the basic function calling behavior. Correct queueing
47 // behavior is tested elsewhere, since After and AfterFunc share
49 func TestAfterFunc(t
*testing
.T
) {
67 func TestAfterStress(t
*testing
.T
) {
70 for atomic
.LoadUint32(&stop
) == 0 {
72 // Yield so that the OS can wake up the timer thread,
73 // so that it can generate channel sends for the main goroutine,
74 // which will eventually set stop = 1 for us.
78 ticker
:= NewTicker(1)
79 for i
:= 0; i
< 100; i
++ {
83 atomic
.StoreUint32(&stop
, 1)
86 func benchmark(b
*testing
.B
, bench
func(n
int)) {
88 // Create equal number of garbage timers on each P before starting
91 garbageAll
:= make([][]*Timer
, runtime
.GOMAXPROCS(0))
92 for i
:= range garbageAll
{
96 garbage
:= make([]*Timer
, 1<<15)
97 for j
:= range garbage
{
98 garbage
[j
] = AfterFunc(Hour
, nil)
100 garbageAll
[i
] = garbage
106 b
.RunParallel(func(pb
*testing
.PB
) {
113 for _
, garbage
:= range garbageAll
{
114 for _
, t
:= range garbage
{
120 func BenchmarkAfterFunc(b
*testing
.B
) {
121 benchmark(b
, func(n
int) {
138 func BenchmarkAfter(b
*testing
.B
) {
139 benchmark(b
, func(n
int) {
140 for i
:= 0; i
< n
; i
++ {
146 func BenchmarkStop(b
*testing
.B
) {
147 benchmark(b
, func(n
int) {
148 for i
:= 0; i
< n
; i
++ {
149 NewTimer(1 * Second
).Stop()
154 func BenchmarkSimultaneousAfterFunc(b
*testing
.B
) {
155 benchmark(b
, func(n
int) {
156 var wg sync
.WaitGroup
158 for i
:= 0; i
< n
; i
++ {
159 AfterFunc(0, wg
.Done
)
165 func BenchmarkStartStop(b
*testing
.B
) {
166 benchmark(b
, func(n
int) {
167 timers
:= make([]*Timer
, n
)
168 for i
:= 0; i
< n
; i
++ {
169 timers
[i
] = AfterFunc(Hour
, nil)
172 for i
:= 0; i
< n
; i
++ {
178 func BenchmarkReset(b
*testing
.B
) {
179 benchmark(b
, func(n
int) {
181 for i
:= 0; i
< n
; i
++ {
188 func BenchmarkSleep(b
*testing
.B
) {
189 benchmark(b
, func(n
int) {
190 var wg sync
.WaitGroup
192 for i
:= 0; i
< n
; i
++ {
202 func TestAfter(t
*testing
.T
) {
203 const delay
= 100 * Millisecond
205 end
:= <-After(delay
)
207 if runtime
.GOOS
== "windows" {
208 delayadj
-= windowsInaccuracy
210 if duration
:= Now().Sub(start
); duration
< delayadj
{
211 t
.Fatalf("After(%s) slept for only %d ns", delay
, duration
)
213 if min
:= start
.Add(delayadj
); end
.Before(min
) {
214 t
.Fatalf("After(%s) expect >= %s, got %s", delay
, min
, end
)
218 func TestAfterTick(t
*testing
.T
) {
220 Delta
:= 100 * Millisecond
222 Delta
= 10 * Millisecond
225 for i
:= 0; i
< Count
; i
++ {
230 target
:= Delta
* Count
232 t
.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count
, Delta
, d
, target
)
234 if !testing
.Short() && d
> target
*30/10 {
235 t
.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count
, Delta
, d
, target
)
239 func TestAfterStop(t
*testing
.T
) {
240 // We want to test that we stop a timer before it runs.
241 // We also want to test that it didn't run after a longer timer.
242 // Since we don't want the test to run for too long, we don't
243 // want to use lengthy times. That makes the test inherently flaky.
244 // So only report an error if it fails five times in a row.
248 for _
, e
:= range errs
{
253 for i
:= 0; i
< 5; i
++ {
254 AfterFunc(100*Millisecond
, func() {})
255 t0
:= NewTimer(50 * Millisecond
)
256 c1
:= make(chan bool, 1)
257 t1
:= AfterFunc(150*Millisecond
, func() { c1
<- true })
258 c2
:= After(200 * Millisecond
)
260 errs
= append(errs
, "failed to stop event 0")
264 errs
= append(errs
, "failed to stop event 1")
270 errs
= append(errs
, "event 0 was not stopped")
273 errs
= append(errs
, "event 1 was not stopped")
278 errs
= append(errs
, "Stop returned true twice")
282 // Test passed, so all done.
284 t
.Logf("saw %d errors, ignoring to avoid flakiness", len(errs
))
291 t
.Errorf("saw %d errors", len(errs
))
295 func TestAfterQueuing(t
*testing
.T
) {
296 // This test flakes out on some systems,
297 // so we'll try it a few times before declaring it a failure.
299 err
:= errors
.New("!=nil")
300 for i
:= 0; i
< attempts
&& err
!= nil; i
++ {
301 delta
:= Duration(20+i
*50) * Millisecond
302 if err
= testAfterQueuing(delta
); err
!= nil {
303 t
.Logf("attempt %v failed: %v", i
, err
)
311 // For gccgo omit 0 for now because it can take too long to start the
312 var slots
= []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8 /*0*/}
314 type afterResult
struct {
319 func await(slot
int, result
chan<- afterResult
, ac
<-chan Time
) {
320 result
<- afterResult
{slot
, <-ac
}
323 func testAfterQueuing(delta Duration
) error
{
324 // make the result channel buffered because we don't want
325 // to depend on channel queueing semantics that might
326 // possibly change in the future.
327 result
:= make(chan afterResult
, len(slots
))
330 for _
, slot
:= range slots
{
331 go await(slot
, result
, After(Duration(slot
)*delta
))
337 order
= append(order
, r
.slot
)
338 times
= append(times
, r
.t
)
340 for i
:= range order
{
341 if i
> 0 && order
[i
] < order
[i
-1] {
342 return fmt
.Errorf("After calls returned out of order: %v", order
)
345 for i
, t
:= range times
{
347 target
:= Duration(order
[i
]) * delta
348 if dt
< target
-delta
/2 || dt
> target
+delta
*10 {
349 return fmt
.Errorf("After(%s) arrived at %s, expected [%s,%s]", target
, dt
, target
-delta
/2, target
+delta
*10)
355 func TestTimerStopStress(t
*testing
.T
) {
359 for i
:= 0; i
< 100; i
++ {
361 timer
:= AfterFunc(2*Second
, func() {
362 t
.Errorf("timer %d was not stopped", i
)
371 func TestSleepZeroDeadlock(t
*testing
.T
) {
372 // Sleep(0) used to hang, the sequence of events was as follows.
373 // Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
374 // Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
375 // After the GC nobody wakes up the goroutine from Gwaiting status.
376 defer runtime
.GOMAXPROCS(runtime
.GOMAXPROCS(4))
379 for i
:= 0; i
< 100; i
++ {
384 for i
:= 0; i
< 100; i
++ {
386 tmp
:= make(chan bool, 1)
393 func testReset(d Duration
) error
{
394 t0
:= NewTimer(2 * d
)
396 if !t0
.Reset(3 * d
) {
397 return errors
.New("resetting unfired timer returned false")
402 return errors
.New("timer fired early")
409 return errors
.New("reset timer did not fire")
412 if t0
.Reset(50 * Millisecond
) {
413 return errors
.New("resetting expired timer returned true")
418 func TestReset(t
*testing
.T
) {
419 // We try to run this test with increasingly larger multiples
420 // until one works so slow, loaded hardware isn't as flaky,
421 // but without slowing down fast machines unnecessarily.
422 const unit
= 25 * Millisecond
430 for _
, d
:= range tries
{
433 t
.Logf("passed using duration %v", d
)
440 // Test that sleeping (via Sleep or Timer) for an interval so large it
441 // overflows does not result in a short sleep duration. Nor does it interfere
442 // with execution of other timers. If it does, timers in this or subsequent
443 // tests may not fire.
444 func TestOverflowSleep(t
*testing
.T
) {
445 const big
= Duration(int64(1<<63 - 1))
449 // On failure, this may return after the test has completed, so
450 // we need to panic instead.
451 panic("big sleep returned")
456 t
.Fatalf("big timeout fired")
457 case <-After(25 * Millisecond
):
461 const neg
= Duration(-1 << 63)
462 Sleep(neg
) // Returns immediately.
466 case <-After(1 * Second
):
467 t
.Fatalf("negative timeout didn't fire")
471 // Test that a panic while deleting a timer does not leave
472 // the timers mutex held, deadlocking a ticker.Stop in a defer.
473 func TestIssue5745(t
*testing
.T
) {
474 ticker
:= NewTicker(Hour
)
476 // would deadlock here before the fix due to
477 // lock taken before the segfault.
480 if r
:= recover(); r
== nil {
481 t
.Error("Expected panic, but none happened.")
485 // cause a panic due to a segfault
488 t
.Error("Should be unreachable.")
491 func TestOverflowPeriodRuntimeTimer(t
*testing
.T
) {
492 // This may hang forever if timers are broken. See comment near
493 // the end of CheckRuntimeTimerOverflow in internal_test.go.
494 CheckRuntimeTimerPeriodOverflow()
497 func checkZeroPanicString(t
*testing
.T
) {
500 if want
:= "called on uninitialized Timer"; !strings
.Contains(s
, want
) {
501 t
.Errorf("panic = %v; want substring %q", e
, want
)
505 func TestZeroTimerResetPanics(t
*testing
.T
) {
506 defer checkZeroPanicString(t
)
511 func TestZeroTimerStopPanics(t
*testing
.T
) {
512 defer checkZeroPanicString(t
)
517 // Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
518 func TestZeroTimer(t
*testing
.T
) {
523 for i
:= 0; i
< 1000000; i
++ {
527 if diff
:= Since(s
); diff
> 2*Second
{
528 t
.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff
)
533 // Test that rapidly moving a timer earlier doesn't cause it to get dropped.
535 func TestTimerModifiedEarlier(t
*testing
.T
) {
536 if runtime
.GOOS
== "plan9" && runtime
.GOARCH
== "arm" {
537 testenv
.SkipFlaky(t
, 50470)
540 past
:= Until(Unix(0, 0))
543 for i
:= 0; i
< count
; i
++ {
544 timer
:= NewTimer(Hour
)
545 for j
:= 0; j
< 10; j
++ {
552 deadline
:= NewTimer(10 * Second
)
553 defer deadline
.Stop()
557 if since
:= Since(now
); since
> 8*Second
{
558 t
.Errorf("timer took too long (%v)", since
)
562 t
.Error("deadline expired")
567 t
.Errorf("%d failures", fail
)
571 // Test that rapidly moving timers earlier and later doesn't cause
572 // some of the sleep times to be lost.
574 func TestAdjustTimers(t
*testing
.T
) {
575 var rnd
= rand
.New(rand
.NewSource(Now().UnixNano()))
577 timers
:= make([]*Timer
, 100)
578 states
:= make([]int, len(timers
))
579 indices
:= rnd
.Perm(len(timers
))
581 for len(indices
) != 0 {
582 var ii
= rnd
.Intn(len(indices
))
585 var timer
= timers
[i
]
586 var state
= states
[i
]
591 timers
[i
] = NewTimer(0)
593 <-timer
.C
// Timer is now idle.
595 // Reset to various long durations, which we'll cancel.
597 if timer
.Reset(1 * Minute
) {
598 panic("shouldn't be active (1)")
601 if timer
.Reset(3 * Minute
) {
602 panic("shouldn't be active (3)")
605 if timer
.Reset(2 * Minute
) {
606 panic("shouldn't be active (2)")
609 // Stop and drain a long-duration timer.
612 t
.Logf("timer %d state %d Stop returned false", i
, state
)
616 // Start a short-duration timer we expect to select without blocking.
619 t
.Fatal("timer.Reset returned true")
625 if dur
> 750*Millisecond
{
626 t
.Errorf("timer %d took %v to complete", i
, dur
)
629 // Timer is done. Swap with tail and remove.
631 indices
[ii
] = indices
[len(indices
)-1]
632 indices
= indices
[:len(indices
)-1]
637 // Benchmark timer latency when the thread that creates the timer is busy with
638 // other work and the timers must be serviced by other threads.
639 // https://golang.org/issue/38860
640 func BenchmarkParallelTimerLatency(b
*testing
.B
) {
641 gmp
:= runtime
.GOMAXPROCS(0)
642 if gmp
< 2 || runtime
.NumCPU() < gmp
{
643 b
.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
646 // allocate memory now to avoid GC interference later.
647 timerCount
:= gmp
- 1
648 stats
:= make([]struct {
652 _
[5]int64 // cache line padding
655 // Ensure the time to start new threads to service timers will not pollute
659 // Note that other than the AfterFunc calls this benchmark is measuring it
660 // avoids using any other timers. In particular, the main goroutine uses
661 // doWork to spin for some durations because up through Go 1.15 if all
662 // threads are idle sysmon could leave deep sleep when we wake.
664 // Ensure sysmon is in deep sleep.
665 doWork(30 * Millisecond
)
669 const delay
= Millisecond
670 var wg sync
.WaitGroup
672 for i
:= 0; i
< b
.N
; i
++ {
674 atomic
.StoreInt32(&count
, 0)
675 for j
:= 0; j
< timerCount
; j
++ {
677 expectedWakeup
:= Now().Add(delay
)
678 AfterFunc(delay
, func() {
679 late
:= Since(expectedWakeup
)
684 stats
[j
].sum
+= float64(late
.Nanoseconds())
685 if late
> stats
[j
].max
{
688 atomic
.AddInt32(&count
, 1)
689 for atomic
.LoadInt32(&count
) < int32(timerCount
) {
690 // spin until all timers fired
696 for atomic
.LoadInt32(&count
) < int32(timerCount
) {
697 // spin until all timers fired
701 // Spin for a bit to let the other scheduler threads go idle before the
708 for _
, s
:= range stats
{
713 samples
+= float64(s
.count
)
715 b
.ReportMetric(0, "ns/op")
716 b
.ReportMetric(total
/samples
, "avg-late-ns")
717 b
.ReportMetric(float64(max
.Nanoseconds()), "max-late-ns")
720 // Benchmark timer latency with staggered wakeup times and varying CPU bound
721 // workloads. https://golang.org/issue/38860
722 func BenchmarkStaggeredTickerLatency(b
*testing
.B
) {
723 gmp
:= runtime
.GOMAXPROCS(0)
724 if gmp
< 2 || runtime
.NumCPU() < gmp
{
725 b
.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
728 const delay
= 3 * Millisecond
730 for _
, dur
:= range []Duration
{300 * Microsecond
, 2 * Millisecond
} {
731 b
.Run(fmt
.Sprintf("work-dur=%s", dur
), func(b
*testing
.B
) {
732 for tickersPerP
:= 1; tickersPerP
< int(delay
/dur
)+1; tickersPerP
++ {
733 tickerCount
:= gmp
* tickersPerP
734 b
.Run(fmt
.Sprintf("tickers-per-P=%d", tickersPerP
), func(b
*testing
.B
) {
735 // allocate memory now to avoid GC interference later.
736 stats
:= make([]struct {
740 _
[5]int64 // cache line padding
743 // Ensure the time to start new threads to service timers
744 // will not pollute the results.
749 var wg sync
.WaitGroup
751 for j
:= 0; j
< tickerCount
; j
++ {
753 doWork(delay
/ Duration(gmp
))
754 expectedWakeup
:= Now().Add(delay
)
755 ticker
:= NewTicker(delay
)
756 go func(c
int, ticker
*Ticker
, firstWake Time
) {
761 late
:= Since(expectedWakeup
)
766 stats
[j
].sum
+= float64(late
.Nanoseconds())
767 if late
> stats
[j
].max
{
770 expectedWakeup
= expectedWakeup
.Add(delay
)
774 }(b
.N
, ticker
, expectedWakeup
)
781 for _
, s
:= range stats
{
786 samples
+= float64(s
.count
)
788 b
.ReportMetric(0, "ns/op")
789 b
.ReportMetric(total
/samples
, "avg-late-ns")
790 b
.ReportMetric(float64(max
.Nanoseconds()), "max-late-ns")
797 // warmupScheduler ensures the scheduler has at least targetThreadCount threads
798 // in its thread pool.
799 func warmupScheduler(targetThreadCount
int) {
800 var wg sync
.WaitGroup
802 for i
:= 0; i
< targetThreadCount
; i
++ {
805 atomic
.AddInt32(&count
, 1)
806 for atomic
.LoadInt32(&count
) < int32(targetThreadCount
) {
807 // spin until all threads started
810 // spin a bit more to ensure they are all running on separate CPUs.
818 func doWork(dur Duration
) {
820 for Since(start
) < dur
{