testsuite: g++.dg: debug: fix test filenames
[official-gcc.git] / libgo / go / time / sleep_test.go
blob9486ce76e6b26bc6afc398141c3009d39ac0284a
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package time_test
7 import (
8 "errors"
9 "fmt"
10 "internal/testenv"
11 "math/rand"
12 "runtime"
13 "strings"
14 "sync"
15 "sync/atomic"
16 "testing"
17 . "time"
20 // Go runtime uses different Windows timers for time.Now and sleeping.
21 // These can tick at different frequencies and can arrive out of sync.
22 // The effect can be seen, for example, as time.Sleep(100ms) is actually
23 // shorter then 100ms when measured as difference between time.Now before and
24 // after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
25 // windowsInaccuracy is to ignore such errors.
26 const windowsInaccuracy = 17 * Millisecond
28 func TestSleep(t *testing.T) {
29 const delay = 100 * Millisecond
30 go func() {
31 Sleep(delay / 2)
32 Interrupt()
33 }()
34 start := Now()
35 Sleep(delay)
36 delayadj := delay
37 if runtime.GOOS == "windows" {
38 delayadj -= windowsInaccuracy
40 duration := Now().Sub(start)
41 if duration < delayadj {
42 t.Fatalf("Sleep(%s) slept for only %s", delay, duration)
46 // Test the basic function calling behavior. Correct queueing
47 // behavior is tested elsewhere, since After and AfterFunc share
48 // the same code.
49 func TestAfterFunc(t *testing.T) {
50 i := 10
51 c := make(chan bool)
52 var f func()
53 f = func() {
54 i--
55 if i >= 0 {
56 AfterFunc(0, f)
57 Sleep(1 * Second)
58 } else {
59 c <- true
63 AfterFunc(0, f)
64 <-c
67 func TestAfterStress(t *testing.T) {
68 stop := uint32(0)
69 go func() {
70 for atomic.LoadUint32(&stop) == 0 {
71 runtime.GC()
72 // Yield so that the OS can wake up the timer thread,
73 // so that it can generate channel sends for the main goroutine,
74 // which will eventually set stop = 1 for us.
75 Sleep(Nanosecond)
77 }()
78 ticker := NewTicker(1)
79 for i := 0; i < 100; i++ {
80 <-ticker.C
82 ticker.Stop()
83 atomic.StoreUint32(&stop, 1)
86 func benchmark(b *testing.B, bench func(n int)) {
88 // Create equal number of garbage timers on each P before starting
89 // the benchmark.
90 var wg sync.WaitGroup
91 garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0))
92 for i := range garbageAll {
93 wg.Add(1)
94 go func(i int) {
95 defer wg.Done()
96 garbage := make([]*Timer, 1<<15)
97 for j := range garbage {
98 garbage[j] = AfterFunc(Hour, nil)
100 garbageAll[i] = garbage
101 }(i)
103 wg.Wait()
105 b.ResetTimer()
106 b.RunParallel(func(pb *testing.PB) {
107 for pb.Next() {
108 bench(1000)
111 b.StopTimer()
113 for _, garbage := range garbageAll {
114 for _, t := range garbage {
115 t.Stop()
120 func BenchmarkAfterFunc(b *testing.B) {
121 benchmark(b, func(n int) {
122 c := make(chan bool)
123 var f func()
124 f = func() {
126 if n >= 0 {
127 AfterFunc(0, f)
128 } else {
129 c <- true
133 AfterFunc(0, f)
138 func BenchmarkAfter(b *testing.B) {
139 benchmark(b, func(n int) {
140 for i := 0; i < n; i++ {
141 <-After(1)
146 func BenchmarkStop(b *testing.B) {
147 benchmark(b, func(n int) {
148 for i := 0; i < n; i++ {
149 NewTimer(1 * Second).Stop()
154 func BenchmarkSimultaneousAfterFunc(b *testing.B) {
155 benchmark(b, func(n int) {
156 var wg sync.WaitGroup
157 wg.Add(n)
158 for i := 0; i < n; i++ {
159 AfterFunc(0, wg.Done)
161 wg.Wait()
165 func BenchmarkStartStop(b *testing.B) {
166 benchmark(b, func(n int) {
167 timers := make([]*Timer, n)
168 for i := 0; i < n; i++ {
169 timers[i] = AfterFunc(Hour, nil)
172 for i := 0; i < n; i++ {
173 timers[i].Stop()
178 func BenchmarkReset(b *testing.B) {
179 benchmark(b, func(n int) {
180 t := NewTimer(Hour)
181 for i := 0; i < n; i++ {
182 t.Reset(Hour)
184 t.Stop()
188 func BenchmarkSleep(b *testing.B) {
189 benchmark(b, func(n int) {
190 var wg sync.WaitGroup
191 wg.Add(n)
192 for i := 0; i < n; i++ {
193 go func() {
194 Sleep(Nanosecond)
195 wg.Done()
198 wg.Wait()
202 func TestAfter(t *testing.T) {
203 const delay = 100 * Millisecond
204 start := Now()
205 end := <-After(delay)
206 delayadj := delay
207 if runtime.GOOS == "windows" {
208 delayadj -= windowsInaccuracy
210 if duration := Now().Sub(start); duration < delayadj {
211 t.Fatalf("After(%s) slept for only %d ns", delay, duration)
213 if min := start.Add(delayadj); end.Before(min) {
214 t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
218 func TestAfterTick(t *testing.T) {
219 const Count = 10
220 Delta := 100 * Millisecond
221 if testing.Short() {
222 Delta = 10 * Millisecond
224 t0 := Now()
225 for i := 0; i < Count; i++ {
226 <-After(Delta)
228 t1 := Now()
229 d := t1.Sub(t0)
230 target := Delta * Count
231 if d < target*9/10 {
232 t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
234 if !testing.Short() && d > target*30/10 {
235 t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
239 func TestAfterStop(t *testing.T) {
240 // We want to test that we stop a timer before it runs.
241 // We also want to test that it didn't run after a longer timer.
242 // Since we don't want the test to run for too long, we don't
243 // want to use lengthy times. That makes the test inherently flaky.
244 // So only report an error if it fails five times in a row.
246 var errs []string
247 logErrs := func() {
248 for _, e := range errs {
249 t.Log(e)
253 for i := 0; i < 5; i++ {
254 AfterFunc(100*Millisecond, func() {})
255 t0 := NewTimer(50 * Millisecond)
256 c1 := make(chan bool, 1)
257 t1 := AfterFunc(150*Millisecond, func() { c1 <- true })
258 c2 := After(200 * Millisecond)
259 if !t0.Stop() {
260 errs = append(errs, "failed to stop event 0")
261 continue
263 if !t1.Stop() {
264 errs = append(errs, "failed to stop event 1")
265 continue
267 <-c2
268 select {
269 case <-t0.C:
270 errs = append(errs, "event 0 was not stopped")
271 continue
272 case <-c1:
273 errs = append(errs, "event 1 was not stopped")
274 continue
275 default:
277 if t1.Stop() {
278 errs = append(errs, "Stop returned true twice")
279 continue
282 // Test passed, so all done.
283 if len(errs) > 0 {
284 t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
285 logErrs()
288 return
291 t.Errorf("saw %d errors", len(errs))
292 logErrs()
295 func TestAfterQueuing(t *testing.T) {
296 // This test flakes out on some systems,
297 // so we'll try it a few times before declaring it a failure.
298 const attempts = 5
299 err := errors.New("!=nil")
300 for i := 0; i < attempts && err != nil; i++ {
301 delta := Duration(20+i*50) * Millisecond
302 if err = testAfterQueuing(delta); err != nil {
303 t.Logf("attempt %v failed: %v", i, err)
306 if err != nil {
307 t.Fatal(err)
311 // For gccgo omit 0 for now because it can take too long to start the
312 var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8 /*0*/}
314 type afterResult struct {
315 slot int
316 t Time
319 func await(slot int, result chan<- afterResult, ac <-chan Time) {
320 result <- afterResult{slot, <-ac}
323 func testAfterQueuing(delta Duration) error {
324 // make the result channel buffered because we don't want
325 // to depend on channel queueing semantics that might
326 // possibly change in the future.
327 result := make(chan afterResult, len(slots))
329 t0 := Now()
330 for _, slot := range slots {
331 go await(slot, result, After(Duration(slot)*delta))
333 var order []int
334 var times []Time
335 for range slots {
336 r := <-result
337 order = append(order, r.slot)
338 times = append(times, r.t)
340 for i := range order {
341 if i > 0 && order[i] < order[i-1] {
342 return fmt.Errorf("After calls returned out of order: %v", order)
345 for i, t := range times {
346 dt := t.Sub(t0)
347 target := Duration(order[i]) * delta
348 if dt < target-delta/2 || dt > target+delta*10 {
349 return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
352 return nil
355 func TestTimerStopStress(t *testing.T) {
356 if testing.Short() {
357 return
359 for i := 0; i < 100; i++ {
360 go func(i int) {
361 timer := AfterFunc(2*Second, func() {
362 t.Errorf("timer %d was not stopped", i)
364 Sleep(1 * Second)
365 timer.Stop()
366 }(i)
368 Sleep(3 * Second)
371 func TestSleepZeroDeadlock(t *testing.T) {
372 // Sleep(0) used to hang, the sequence of events was as follows.
373 // Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
374 // Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
375 // After the GC nobody wakes up the goroutine from Gwaiting status.
376 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
377 c := make(chan bool)
378 go func() {
379 for i := 0; i < 100; i++ {
380 runtime.GC()
382 c <- true
384 for i := 0; i < 100; i++ {
385 Sleep(0)
386 tmp := make(chan bool, 1)
387 tmp <- true
388 <-tmp
393 func testReset(d Duration) error {
394 t0 := NewTimer(2 * d)
395 Sleep(d)
396 if !t0.Reset(3 * d) {
397 return errors.New("resetting unfired timer returned false")
399 Sleep(2 * d)
400 select {
401 case <-t0.C:
402 return errors.New("timer fired early")
403 default:
405 Sleep(2 * d)
406 select {
407 case <-t0.C:
408 default:
409 return errors.New("reset timer did not fire")
412 if t0.Reset(50 * Millisecond) {
413 return errors.New("resetting expired timer returned true")
415 return nil
418 func TestReset(t *testing.T) {
419 // We try to run this test with increasingly larger multiples
420 // until one works so slow, loaded hardware isn't as flaky,
421 // but without slowing down fast machines unnecessarily.
422 const unit = 25 * Millisecond
423 tries := []Duration{
424 1 * unit,
425 3 * unit,
426 7 * unit,
427 15 * unit,
429 var err error
430 for _, d := range tries {
431 err = testReset(d)
432 if err == nil {
433 t.Logf("passed using duration %v", d)
434 return
437 t.Error(err)
440 // Test that sleeping (via Sleep or Timer) for an interval so large it
441 // overflows does not result in a short sleep duration. Nor does it interfere
442 // with execution of other timers. If it does, timers in this or subsequent
443 // tests may not fire.
444 func TestOverflowSleep(t *testing.T) {
445 const big = Duration(int64(1<<63 - 1))
447 go func() {
448 Sleep(big)
449 // On failure, this may return after the test has completed, so
450 // we need to panic instead.
451 panic("big sleep returned")
454 select {
455 case <-After(big):
456 t.Fatalf("big timeout fired")
457 case <-After(25 * Millisecond):
458 // OK
461 const neg = Duration(-1 << 63)
462 Sleep(neg) // Returns immediately.
463 select {
464 case <-After(neg):
465 // OK
466 case <-After(1 * Second):
467 t.Fatalf("negative timeout didn't fire")
471 // Test that a panic while deleting a timer does not leave
472 // the timers mutex held, deadlocking a ticker.Stop in a defer.
473 func TestIssue5745(t *testing.T) {
474 ticker := NewTicker(Hour)
475 defer func() {
476 // would deadlock here before the fix due to
477 // lock taken before the segfault.
478 ticker.Stop()
480 if r := recover(); r == nil {
481 t.Error("Expected panic, but none happened.")
485 // cause a panic due to a segfault
486 var timer *Timer
487 timer.Stop()
488 t.Error("Should be unreachable.")
491 func TestOverflowPeriodRuntimeTimer(t *testing.T) {
492 // This may hang forever if timers are broken. See comment near
493 // the end of CheckRuntimeTimerOverflow in internal_test.go.
494 CheckRuntimeTimerPeriodOverflow()
497 func checkZeroPanicString(t *testing.T) {
498 e := recover()
499 s, _ := e.(string)
500 if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
501 t.Errorf("panic = %v; want substring %q", e, want)
505 func TestZeroTimerResetPanics(t *testing.T) {
506 defer checkZeroPanicString(t)
507 var tr Timer
508 tr.Reset(1)
511 func TestZeroTimerStopPanics(t *testing.T) {
512 defer checkZeroPanicString(t)
513 var tr Timer
514 tr.Stop()
517 // Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
518 func TestZeroTimer(t *testing.T) {
519 if testing.Short() {
520 t.Skip("-short")
523 for i := 0; i < 1000000; i++ {
524 s := Now()
525 ti := NewTimer(0)
526 <-ti.C
527 if diff := Since(s); diff > 2*Second {
528 t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff)
533 // Test that rapidly moving a timer earlier doesn't cause it to get dropped.
534 // Issue 47329.
535 func TestTimerModifiedEarlier(t *testing.T) {
536 if runtime.GOOS == "plan9" && runtime.GOARCH == "arm" {
537 testenv.SkipFlaky(t, 50470)
540 past := Until(Unix(0, 0))
541 count := 1000
542 fail := 0
543 for i := 0; i < count; i++ {
544 timer := NewTimer(Hour)
545 for j := 0; j < 10; j++ {
546 if !timer.Stop() {
547 <-timer.C
549 timer.Reset(past)
552 deadline := NewTimer(10 * Second)
553 defer deadline.Stop()
554 now := Now()
555 select {
556 case <-timer.C:
557 if since := Since(now); since > 8*Second {
558 t.Errorf("timer took too long (%v)", since)
559 fail++
561 case <-deadline.C:
562 t.Error("deadline expired")
566 if fail > 0 {
567 t.Errorf("%d failures", fail)
571 // Test that rapidly moving timers earlier and later doesn't cause
572 // some of the sleep times to be lost.
573 // Issue 47762
574 func TestAdjustTimers(t *testing.T) {
575 var rnd = rand.New(rand.NewSource(Now().UnixNano()))
577 timers := make([]*Timer, 100)
578 states := make([]int, len(timers))
579 indices := rnd.Perm(len(timers))
581 for len(indices) != 0 {
582 var ii = rnd.Intn(len(indices))
583 var i = indices[ii]
585 var timer = timers[i]
586 var state = states[i]
587 states[i]++
589 switch state {
590 case 0:
591 timers[i] = NewTimer(0)
592 case 1:
593 <-timer.C // Timer is now idle.
595 // Reset to various long durations, which we'll cancel.
596 case 2:
597 if timer.Reset(1 * Minute) {
598 panic("shouldn't be active (1)")
600 case 4:
601 if timer.Reset(3 * Minute) {
602 panic("shouldn't be active (3)")
604 case 6:
605 if timer.Reset(2 * Minute) {
606 panic("shouldn't be active (2)")
609 // Stop and drain a long-duration timer.
610 case 3, 5, 7:
611 if !timer.Stop() {
612 t.Logf("timer %d state %d Stop returned false", i, state)
613 <-timer.C
616 // Start a short-duration timer we expect to select without blocking.
617 case 8:
618 if timer.Reset(0) {
619 t.Fatal("timer.Reset returned true")
621 case 9:
622 now := Now()
623 <-timer.C
624 dur := Since(now)
625 if dur > 750*Millisecond {
626 t.Errorf("timer %d took %v to complete", i, dur)
629 // Timer is done. Swap with tail and remove.
630 case 10:
631 indices[ii] = indices[len(indices)-1]
632 indices = indices[:len(indices)-1]
637 // Benchmark timer latency when the thread that creates the timer is busy with
638 // other work and the timers must be serviced by other threads.
639 // https://golang.org/issue/38860
640 func BenchmarkParallelTimerLatency(b *testing.B) {
641 gmp := runtime.GOMAXPROCS(0)
642 if gmp < 2 || runtime.NumCPU() < gmp {
643 b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
646 // allocate memory now to avoid GC interference later.
647 timerCount := gmp - 1
648 stats := make([]struct {
649 sum float64
650 max Duration
651 count int64
652 _ [5]int64 // cache line padding
653 }, timerCount)
655 // Ensure the time to start new threads to service timers will not pollute
656 // the results.
657 warmupScheduler(gmp)
659 // Note that other than the AfterFunc calls this benchmark is measuring it
660 // avoids using any other timers. In particular, the main goroutine uses
661 // doWork to spin for some durations because up through Go 1.15 if all
662 // threads are idle sysmon could leave deep sleep when we wake.
664 // Ensure sysmon is in deep sleep.
665 doWork(30 * Millisecond)
667 b.ResetTimer()
669 const delay = Millisecond
670 var wg sync.WaitGroup
671 var count int32
672 for i := 0; i < b.N; i++ {
673 wg.Add(timerCount)
674 atomic.StoreInt32(&count, 0)
675 for j := 0; j < timerCount; j++ {
676 j := j
677 expectedWakeup := Now().Add(delay)
678 AfterFunc(delay, func() {
679 late := Since(expectedWakeup)
680 if late < 0 {
681 late = 0
683 stats[j].count++
684 stats[j].sum += float64(late.Nanoseconds())
685 if late > stats[j].max {
686 stats[j].max = late
688 atomic.AddInt32(&count, 1)
689 for atomic.LoadInt32(&count) < int32(timerCount) {
690 // spin until all timers fired
692 wg.Done()
696 for atomic.LoadInt32(&count) < int32(timerCount) {
697 // spin until all timers fired
699 wg.Wait()
701 // Spin for a bit to let the other scheduler threads go idle before the
702 // next round.
703 doWork(Millisecond)
705 var total float64
706 var samples float64
707 max := Duration(0)
708 for _, s := range stats {
709 if s.max > max {
710 max = s.max
712 total += s.sum
713 samples += float64(s.count)
715 b.ReportMetric(0, "ns/op")
716 b.ReportMetric(total/samples, "avg-late-ns")
717 b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
720 // Benchmark timer latency with staggered wakeup times and varying CPU bound
721 // workloads. https://golang.org/issue/38860
722 func BenchmarkStaggeredTickerLatency(b *testing.B) {
723 gmp := runtime.GOMAXPROCS(0)
724 if gmp < 2 || runtime.NumCPU() < gmp {
725 b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
728 const delay = 3 * Millisecond
730 for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} {
731 b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) {
732 for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ {
733 tickerCount := gmp * tickersPerP
734 b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) {
735 // allocate memory now to avoid GC interference later.
736 stats := make([]struct {
737 sum float64
738 max Duration
739 count int64
740 _ [5]int64 // cache line padding
741 }, tickerCount)
743 // Ensure the time to start new threads to service timers
744 // will not pollute the results.
745 warmupScheduler(gmp)
747 b.ResetTimer()
749 var wg sync.WaitGroup
750 wg.Add(tickerCount)
751 for j := 0; j < tickerCount; j++ {
752 j := j
753 doWork(delay / Duration(gmp))
754 expectedWakeup := Now().Add(delay)
755 ticker := NewTicker(delay)
756 go func(c int, ticker *Ticker, firstWake Time) {
757 defer ticker.Stop()
759 for ; c > 0; c-- {
760 <-ticker.C
761 late := Since(expectedWakeup)
762 if late < 0 {
763 late = 0
765 stats[j].count++
766 stats[j].sum += float64(late.Nanoseconds())
767 if late > stats[j].max {
768 stats[j].max = late
770 expectedWakeup = expectedWakeup.Add(delay)
771 doWork(dur)
773 wg.Done()
774 }(b.N, ticker, expectedWakeup)
776 wg.Wait()
778 var total float64
779 var samples float64
780 max := Duration(0)
781 for _, s := range stats {
782 if s.max > max {
783 max = s.max
785 total += s.sum
786 samples += float64(s.count)
788 b.ReportMetric(0, "ns/op")
789 b.ReportMetric(total/samples, "avg-late-ns")
790 b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
797 // warmupScheduler ensures the scheduler has at least targetThreadCount threads
798 // in its thread pool.
799 func warmupScheduler(targetThreadCount int) {
800 var wg sync.WaitGroup
801 var count int32
802 for i := 0; i < targetThreadCount; i++ {
803 wg.Add(1)
804 go func() {
805 atomic.AddInt32(&count, 1)
806 for atomic.LoadInt32(&count) < int32(targetThreadCount) {
807 // spin until all threads started
810 // spin a bit more to ensure they are all running on separate CPUs.
811 doWork(Millisecond)
812 wg.Done()
815 wg.Wait()
818 func doWork(dur Duration) {
819 start := Now()
820 for Since(start) < dur {