diff options
author | Cherry Zhang <cherryyz@google.com> | 2020-10-28 09:12:20 -0400 |
---|---|---|
committer | Cherry Zhang <cherryyz@google.com> | 2020-10-28 09:12:20 -0400 |
commit | a16e30d162c1c7408db7821e7b9513cefa09c6ca (patch) | |
tree | af752ba9ba44c547df39bb0af9bff79f610ba9d5 /src/time/sleep_test.go | |
parent | 91e4d2d57bc341dd82c98247117114c851380aef (diff) | |
parent | cf6cfba4d5358404dd890f6025e573a4b2156543 (diff) | |
download | go-git-dev.link.tar.gz |
[dev.link] all: merge branch 'master' into dev.linkdev.link
Clean merge.
Change-Id: Ia7b2808bc649790198d34c226a61d9e569084dc5
Diffstat (limited to 'src/time/sleep_test.go')
-rw-r--r-- | src/time/sleep_test.go | 187 |
1 files changed, 187 insertions, 0 deletions
diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index f5678020b9..ba0016bf49 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -501,3 +501,190 @@ func TestZeroTimerStopPanics(t *testing.T) { var tr Timer tr.Stop() } + +// Benchmark timer latency when the thread that creates the timer is busy with +// other work and the timers must be serviced by other threads. +// https://golang.org/issue/38860 +func BenchmarkParallelTimerLatency(b *testing.B) { + gmp := runtime.GOMAXPROCS(0) + if gmp < 2 || runtime.NumCPU() < gmp { + b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS") + } + + // allocate memory now to avoid GC interference later. + timerCount := gmp - 1 + stats := make([]struct { + sum float64 + max Duration + count int64 + _ [5]int64 // cache line padding + }, timerCount) + + // Ensure the time to start new threads to service timers will not pollute + // the results. + warmupScheduler(gmp) + + // Note that other than the AfterFunc calls this benchmark is measuring it + // avoids using any other timers. In particular, the main goroutine uses + // doWork to spin for some durations because up through Go 1.15 if all + // threads are idle sysmon could leave deep sleep when we wake. + + // Ensure sysmon is in deep sleep. + doWork(30 * Millisecond) + + b.ResetTimer() + + const delay = Millisecond + var wg sync.WaitGroup + var count int32 + for i := 0; i < b.N; i++ { + wg.Add(timerCount) + atomic.StoreInt32(&count, 0) + for j := 0; j < timerCount; j++ { + j := j + expectedWakeup := Now().Add(delay) + AfterFunc(delay, func() { + late := Since(expectedWakeup) + if late < 0 { + late = 0 + } + stats[j].count++ + stats[j].sum += float64(late.Nanoseconds()) + if late > stats[j].max { + stats[j].max = late + } + atomic.AddInt32(&count, 1) + for atomic.LoadInt32(&count) < int32(timerCount) { + // spin until all timers fired + } + wg.Done() + }) + } + + for atomic.LoadInt32(&count) < int32(timerCount) { + // spin until all timers fired + } + wg.Wait() + + // Spin for a bit to let the other scheduler threads go idle before the + // next round. + doWork(Millisecond) + } + var total float64 + var samples float64 + max := Duration(0) + for _, s := range stats { + if s.max > max { + max = s.max + } + total += s.sum + samples += float64(s.count) + } + b.ReportMetric(0, "ns/op") + b.ReportMetric(total/samples, "avg-late-ns") + b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns") +} + +// Benchmark timer latency with staggered wakeup times and varying CPU bound +// workloads. https://golang.org/issue/38860 +func BenchmarkStaggeredTickerLatency(b *testing.B) { + gmp := runtime.GOMAXPROCS(0) + if gmp < 2 || runtime.NumCPU() < gmp { + b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS") + } + + const delay = 3 * Millisecond + + for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} { + b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) { + for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ { + tickerCount := gmp * tickersPerP + b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) { + // allocate memory now to avoid GC interference later. + stats := make([]struct { + sum float64 + max Duration + count int64 + _ [5]int64 // cache line padding + }, tickerCount) + + // Ensure the time to start new threads to service timers + // will not pollute the results. + warmupScheduler(gmp) + + b.ResetTimer() + + var wg sync.WaitGroup + wg.Add(tickerCount) + for j := 0; j < tickerCount; j++ { + j := j + doWork(delay / Duration(gmp)) + expectedWakeup := Now().Add(delay) + ticker := NewTicker(delay) + go func(c int, ticker *Ticker, firstWake Time) { + defer ticker.Stop() + + for ; c > 0; c-- { + <-ticker.C + late := Since(expectedWakeup) + if late < 0 { + late = 0 + } + stats[j].count++ + stats[j].sum += float64(late.Nanoseconds()) + if late > stats[j].max { + stats[j].max = late + } + expectedWakeup = expectedWakeup.Add(delay) + doWork(dur) + } + wg.Done() + }(b.N, ticker, expectedWakeup) + } + wg.Wait() + + var total float64 + var samples float64 + max := Duration(0) + for _, s := range stats { + if s.max > max { + max = s.max + } + total += s.sum + samples += float64(s.count) + } + b.ReportMetric(0, "ns/op") + b.ReportMetric(total/samples, "avg-late-ns") + b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns") + }) + } + }) + } +} + +// warmupScheduler ensures the scheduler has at least targetThreadCount threads +// in its thread pool. +func warmupScheduler(targetThreadCount int) { + var wg sync.WaitGroup + var count int32 + for i := 0; i < targetThreadCount; i++ { + wg.Add(1) + go func() { + atomic.AddInt32(&count, 1) + for atomic.LoadInt32(&count) < int32(targetThreadCount) { + // spin until all threads started + } + + // spin a bit more to ensure they are all running on separate CPUs. + doWork(Millisecond) + wg.Done() + }() + } + wg.Wait() +} + +func doWork(dur Duration) { + start := Now() + for Since(start) < dur { + } +} |