Revert "[PATCH 7/7] RISC-V: Disable by pieces for vector setmem length > UNITS_PER_WORD"
[official-gcc.git] / libgo / go / runtime / mgcpacer_test.go
blob10a8ca25202d18be47890fc1d632dbe3817e9dc9
1 // Copyright 2021 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime_test
7 import (
8 "fmt"
9 "internal/goexperiment"
10 "math"
11 "math/rand"
12 . "runtime"
13 "testing"
14 "time"
17 func TestGcPacer(t *testing.T) {
18 t.Parallel()
20 const initialHeapBytes = 256 << 10
21 for _, e := range []*gcExecTest{
23 // The most basic test case: a steady-state heap.
24 // Growth to an O(MiB) heap, then constant heap size, alloc/scan rates.
25 name: "Steady",
26 gcPercent: 100,
27 globalsBytes: 32 << 10,
28 nCores: 8,
29 allocRate: constant(33.0),
30 scanRate: constant(1024.0),
31 growthRate: constant(2.0).sum(ramp(-1.0, 12)),
32 scannableFrac: constant(1.0),
33 stackBytes: constant(8192),
34 length: 50,
35 checker: func(t *testing.T, c []gcCycleResult) {
36 n := len(c)
37 if n >= 25 {
38 if goexperiment.PacerRedesign {
39 // For the pacer redesign, assert something even stronger: at this alloc/scan rate,
40 // it should be extremely close to the goal utilization.
41 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005)
44 // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles.
45 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005)
46 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05)
51 // Same as the steady-state case, but lots of stacks to scan relative to the heap size.
52 name: "SteadyBigStacks",
53 gcPercent: 100,
54 globalsBytes: 32 << 10,
55 nCores: 8,
56 allocRate: constant(132.0),
57 scanRate: constant(1024.0),
58 growthRate: constant(2.0).sum(ramp(-1.0, 12)),
59 scannableFrac: constant(1.0),
60 stackBytes: constant(2048).sum(ramp(128<<20, 8)),
61 length: 50,
62 checker: func(t *testing.T, c []gcCycleResult) {
63 // Check the same conditions as the steady-state case, except the old pacer can't
64 // really handle this well, so don't check the goal ratio for it.
65 n := len(c)
66 if n >= 25 {
67 if goexperiment.PacerRedesign {
68 // For the pacer redesign, assert something even stronger: at this alloc/scan rate,
69 // it should be extremely close to the goal utilization.
70 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005)
71 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05)
74 // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles.
75 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005)
80 // Same as the steady-state case, but lots of globals to scan relative to the heap size.
81 name: "SteadyBigGlobals",
82 gcPercent: 100,
83 globalsBytes: 128 << 20,
84 nCores: 8,
85 allocRate: constant(132.0),
86 scanRate: constant(1024.0),
87 growthRate: constant(2.0).sum(ramp(-1.0, 12)),
88 scannableFrac: constant(1.0),
89 stackBytes: constant(8192),
90 length: 50,
91 checker: func(t *testing.T, c []gcCycleResult) {
92 // Check the same conditions as the steady-state case, except the old pacer can't
93 // really handle this well, so don't check the goal ratio for it.
94 n := len(c)
95 if n >= 25 {
96 if goexperiment.PacerRedesign {
97 // For the pacer redesign, assert something even stronger: at this alloc/scan rate,
98 // it should be extremely close to the goal utilization.
99 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, 0.005)
100 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05)
103 // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles.
104 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005)
109 // This tests the GC pacer's response to a small change in allocation rate.
110 name: "StepAlloc",
111 gcPercent: 100,
112 globalsBytes: 32 << 10,
113 nCores: 8,
114 allocRate: constant(33.0).sum(ramp(66.0, 1).delay(50)),
115 scanRate: constant(1024.0),
116 growthRate: constant(2.0).sum(ramp(-1.0, 12)),
117 scannableFrac: constant(1.0),
118 stackBytes: constant(8192),
119 length: 100,
120 checker: func(t *testing.T, c []gcCycleResult) {
121 n := len(c)
122 if (n >= 25 && n < 50) || n >= 75 {
123 // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles
124 // and then is able to settle again after a significant jump in allocation rate.
125 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005)
126 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05)
131 // This tests the GC pacer's response to a large change in allocation rate.
132 name: "HeavyStepAlloc",
133 gcPercent: 100,
134 globalsBytes: 32 << 10,
135 nCores: 8,
136 allocRate: constant(33).sum(ramp(330, 1).delay(50)),
137 scanRate: constant(1024.0),
138 growthRate: constant(2.0).sum(ramp(-1.0, 12)),
139 scannableFrac: constant(1.0),
140 stackBytes: constant(8192),
141 length: 100,
142 checker: func(t *testing.T, c []gcCycleResult) {
143 n := len(c)
144 if (n >= 25 && n < 50) || n >= 75 {
145 // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles
146 // and then is able to settle again after a significant jump in allocation rate.
147 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005)
148 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05)
153 // This tests the GC pacer's response to a change in the fraction of the scannable heap.
154 name: "StepScannableFrac",
155 gcPercent: 100,
156 globalsBytes: 32 << 10,
157 nCores: 8,
158 allocRate: constant(128.0),
159 scanRate: constant(1024.0),
160 growthRate: constant(2.0).sum(ramp(-1.0, 12)),
161 scannableFrac: constant(0.2).sum(unit(0.5).delay(50)),
162 stackBytes: constant(8192),
163 length: 100,
164 checker: func(t *testing.T, c []gcCycleResult) {
165 n := len(c)
166 if (n >= 25 && n < 50) || n >= 75 {
167 // Make sure the pacer settles into a non-degenerate state in at least 25 GC cycles
168 // and then is able to settle again after a significant jump in allocation rate.
169 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.005)
170 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05)
175 // Tests the pacer for a high GOGC value with a large heap growth happening
176 // in the middle. The purpose of the large heap growth is to check if GC
177 // utilization ends up sensitive
178 name: "HighGOGC",
179 gcPercent: 1500,
180 globalsBytes: 32 << 10,
181 nCores: 8,
182 allocRate: random(7, 0x53).offset(165),
183 scanRate: constant(1024.0),
184 growthRate: constant(2.0).sum(ramp(-1.0, 12), random(0.01, 0x1), unit(14).delay(25)),
185 scannableFrac: constant(1.0),
186 stackBytes: constant(8192),
187 length: 50,
188 checker: func(t *testing.T, c []gcCycleResult) {
189 n := len(c)
190 if goexperiment.PacerRedesign && n > 12 {
191 if n == 26 {
192 // In the 26th cycle there's a heap growth. Overshoot is expected to maintain
193 // a stable utilization, but we should *never* overshoot more than GOGC of
194 // the next cycle.
195 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.90, 15)
196 } else {
197 // Give a wider goal range here. With such a high GOGC value we're going to be
198 // forced to undershoot.
200 // TODO(mknyszek): Instead of placing a 0.95 limit on the trigger, make the limit
201 // based on absolute bytes, that's based somewhat in how the minimum heap size
202 // is determined.
203 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.90, 1.05)
206 // Ensure utilization remains stable despite a growth in live heap size
207 // at GC #25. This test fails prior to the GC pacer redesign.
209 // Because GOGC is so large, we should also be really close to the goal utilization.
210 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, GCGoalUtilization, GCGoalUtilization+0.03)
211 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.03)
216 // This test makes sure that in the face of a varying (in this case, oscillating) allocation
217 // rate, the pacer does a reasonably good job of staying abreast of the changes.
218 name: "OscAlloc",
219 gcPercent: 100,
220 globalsBytes: 32 << 10,
221 nCores: 8,
222 allocRate: oscillate(13, 0, 8).offset(67),
223 scanRate: constant(1024.0),
224 growthRate: constant(2.0).sum(ramp(-1.0, 12)),
225 scannableFrac: constant(1.0),
226 stackBytes: constant(8192),
227 length: 50,
228 checker: func(t *testing.T, c []gcCycleResult) {
229 n := len(c)
230 if n > 12 {
231 // After the 12th GC, the heap will stop growing. Now, just make sure that:
232 // 1. Utilization isn't varying _too_ much, and
233 // 2. The pacer is mostly keeping up with the goal.
234 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05)
235 if goexperiment.PacerRedesign {
236 assertInRange(t, "GC utilization", c[n-1].gcUtilization, 0.25, 0.3)
237 } else {
238 // The old pacer is messier here, and needs a lot more tolerance.
239 assertInRange(t, "GC utilization", c[n-1].gcUtilization, 0.25, 0.4)
245 // This test is the same as OscAlloc, but instead of oscillating, the allocation rate is jittery.
246 name: "JitterAlloc",
247 gcPercent: 100,
248 globalsBytes: 32 << 10,
249 nCores: 8,
250 allocRate: random(13, 0xf).offset(132),
251 scanRate: constant(1024.0),
252 growthRate: constant(2.0).sum(ramp(-1.0, 12), random(0.01, 0xe)),
253 scannableFrac: constant(1.0),
254 stackBytes: constant(8192),
255 length: 50,
256 checker: func(t *testing.T, c []gcCycleResult) {
257 n := len(c)
258 if n > 12 {
259 // After the 12th GC, the heap will stop growing. Now, just make sure that:
260 // 1. Utilization isn't varying _too_ much, and
261 // 2. The pacer is mostly keeping up with the goal.
262 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05)
263 if goexperiment.PacerRedesign {
264 assertInRange(t, "GC utilization", c[n-1].gcUtilization, 0.25, 0.3)
265 } else {
266 // The old pacer is messier here, and needs a lot more tolerance.
267 assertInRange(t, "GC utilization", c[n-1].gcUtilization, 0.25, 0.4)
273 // This test is the same as JitterAlloc, but with a much higher allocation rate.
274 // The jitter is proportionally the same.
275 name: "HeavyJitterAlloc",
276 gcPercent: 100,
277 globalsBytes: 32 << 10,
278 nCores: 8,
279 allocRate: random(33.0, 0x0).offset(330),
280 scanRate: constant(1024.0),
281 growthRate: constant(2.0).sum(ramp(-1.0, 12), random(0.01, 0x152)),
282 scannableFrac: constant(1.0),
283 stackBytes: constant(8192),
284 length: 50,
285 checker: func(t *testing.T, c []gcCycleResult) {
286 n := len(c)
287 if n > 13 {
288 // After the 12th GC, the heap will stop growing. Now, just make sure that:
289 // 1. Utilization isn't varying _too_ much, and
290 // 2. The pacer is mostly keeping up with the goal.
291 // We start at the 13th here because we want to use the 12th as a reference.
292 assertInRange(t, "goal ratio", c[n-1].goalRatio(), 0.95, 1.05)
293 // Unlike the other tests, GC utilization here will vary more and tend higher.
294 // Just make sure it's not going too crazy.
295 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[n-2].gcUtilization, 0.05)
296 if goexperiment.PacerRedesign {
297 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[11].gcUtilization, 0.05)
298 } else {
299 // The old pacer is messier here, and needs a little more tolerance.
300 assertInEpsilon(t, "GC utilization", c[n-1].gcUtilization, c[11].gcUtilization, 0.07)
305 // TODO(mknyszek): Write a test that exercises the pacer's hard goal.
306 // This is difficult in the idealized model this testing framework places
307 // the pacer in, because the calculated overshoot is directly proportional
308 // to the runway for the case of the expected work.
309 // However, it is still possible to trigger this case if something exceptional
310 // happens between calls to revise; the framework just doesn't support this yet.
312 e := e
313 t.Run(e.name, func(t *testing.T) {
314 t.Parallel()
316 c := NewGCController(e.gcPercent)
317 var bytesAllocatedBlackLast int64
318 results := make([]gcCycleResult, 0, e.length)
319 for i := 0; i < e.length; i++ {
320 cycle := e.next()
321 c.StartCycle(cycle.stackBytes, e.globalsBytes, cycle.scannableFrac, e.nCores)
323 // Update pacer incrementally as we complete scan work.
324 const (
325 revisePeriod = 500 * time.Microsecond
326 rateConv = 1024 * float64(revisePeriod) / float64(time.Millisecond)
328 var nextHeapMarked int64
329 if i == 0 {
330 nextHeapMarked = initialHeapBytes
331 } else {
332 nextHeapMarked = int64(float64(int64(c.HeapMarked())-bytesAllocatedBlackLast) * cycle.growthRate)
334 globalsScanWorkLeft := int64(e.globalsBytes)
335 stackScanWorkLeft := int64(cycle.stackBytes)
336 heapScanWorkLeft := int64(float64(nextHeapMarked) * cycle.scannableFrac)
337 doWork := func(work int64) (int64, int64, int64) {
338 var deltas [3]int64
340 // Do globals work first, then stacks, then heap.
341 for i, workLeft := range []*int64{&globalsScanWorkLeft, &stackScanWorkLeft, &heapScanWorkLeft} {
342 if *workLeft == 0 {
343 continue
345 if *workLeft > work {
346 deltas[i] += work
347 *workLeft -= work
348 work = 0
349 break
350 } else {
351 deltas[i] += *workLeft
352 work -= *workLeft
353 *workLeft = 0
356 return deltas[0], deltas[1], deltas[2]
358 var (
359 gcDuration int64
360 assistTime int64
361 bytesAllocatedBlack int64
363 for heapScanWorkLeft+stackScanWorkLeft+globalsScanWorkLeft > 0 {
364 // Simulate GC assist pacing.
366 // Note that this is an idealized view of the GC assist pacing
367 // mechanism.
369 // From the assist ratio and the alloc and scan rates, we can idealize what
370 // the GC CPU utilization looks like.
372 // We start with assistRatio = (bytes of scan work) / (bytes of runway) (by definition).
374 // Over revisePeriod, we can also calculate how many bytes are scanned and
375 // allocated, given some GC CPU utilization u:
377 // bytesScanned = scanRate * rateConv * nCores * u
378 // bytesAllocated = allocRate * rateConv * nCores * (1 - u)
380 // During revisePeriod, assistRatio is kept constant, and GC assists kick in to
381 // maintain it. Specifically, they act to prevent too many bytes being allocated
382 // compared to how many bytes are scanned. It directly defines the ratio of
383 // bytesScanned to bytesAllocated over this period, hence:
385 // assistRatio = bytesScanned / bytesAllocated
387 // From this, we can solve for utilization, because everything else has already
388 // been determined:
390 // assistRatio = (scanRate * rateConv * nCores * u) / (allocRate * rateConv * nCores * (1 - u))
391 // assistRatio = (scanRate * u) / (allocRate * (1 - u))
392 // assistRatio * allocRate * (1-u) = scanRate * u
393 // assistRatio * allocRate - assistRatio * allocRate * u = scanRate * u
394 // assistRatio * allocRate = assistRatio * allocRate * u + scanRate * u
395 // assistRatio * allocRate = (assistRatio * allocRate + scanRate) * u
396 // u = (assistRatio * allocRate) / (assistRatio * allocRate + scanRate)
398 // Note that this may give a utilization that is _less_ than GCBackgroundUtilization,
399 // which isn't possible in practice because of dedicated workers. Thus, this case
400 // must be interpreted as GC assists not kicking in at all, and just round up. All
401 // downstream values will then have this accounted for.
402 assistRatio := c.AssistWorkPerByte()
403 utilization := assistRatio * cycle.allocRate / (assistRatio*cycle.allocRate + cycle.scanRate)
404 if utilization < GCBackgroundUtilization {
405 utilization = GCBackgroundUtilization
408 // Knowing the utilization, calculate bytesScanned and bytesAllocated.
409 bytesScanned := int64(cycle.scanRate * rateConv * float64(e.nCores) * utilization)
410 bytesAllocated := int64(cycle.allocRate * rateConv * float64(e.nCores) * (1 - utilization))
412 // Subtract work from our model.
413 globalsScanned, stackScanned, heapScanned := doWork(bytesScanned)
415 // doWork may not use all of bytesScanned.
416 // In this case, the GC actually ends sometime in this period.
417 // Let's figure out when, exactly, and adjust bytesAllocated too.
418 actualElapsed := revisePeriod
419 actualAllocated := bytesAllocated
420 if actualScanned := globalsScanned + stackScanned + heapScanned; actualScanned < bytesScanned {
421 // actualScanned = scanRate * rateConv * (t / revisePeriod) * nCores * u
422 // => t = actualScanned * revisePeriod / (scanRate * rateConv * nCores * u)
423 actualElapsed = time.Duration(float64(actualScanned) * float64(revisePeriod) / (cycle.scanRate * rateConv * float64(e.nCores) * utilization))
424 actualAllocated = int64(cycle.allocRate * rateConv * float64(actualElapsed) / float64(revisePeriod) * float64(e.nCores) * (1 - utilization))
427 // Ask the pacer to revise.
428 c.Revise(GCControllerReviseDelta{
429 HeapLive: actualAllocated,
430 HeapScan: int64(float64(actualAllocated) * cycle.scannableFrac),
431 HeapScanWork: heapScanned,
432 StackScanWork: stackScanned,
433 GlobalsScanWork: globalsScanned,
436 // Accumulate variables.
437 assistTime += int64(float64(actualElapsed) * float64(e.nCores) * (utilization - GCBackgroundUtilization))
438 gcDuration += int64(actualElapsed)
439 bytesAllocatedBlack += actualAllocated
442 // Put together the results, log them, and concatenate them.
443 result := gcCycleResult{
444 cycle: i + 1,
445 heapLive: c.HeapMarked(),
446 heapScannable: int64(float64(int64(c.HeapMarked())-bytesAllocatedBlackLast) * cycle.scannableFrac),
447 heapTrigger: c.Trigger(),
448 heapPeak: c.HeapLive(),
449 heapGoal: c.HeapGoal(),
450 gcUtilization: float64(assistTime)/(float64(gcDuration)*float64(e.nCores)) + GCBackgroundUtilization,
452 t.Log("GC", result.String())
453 results = append(results, result)
455 // Run the checker for this test.
456 e.check(t, results)
458 c.EndCycle(uint64(nextHeapMarked+bytesAllocatedBlack), assistTime, gcDuration, e.nCores)
460 bytesAllocatedBlackLast = bytesAllocatedBlack
466 type gcExecTest struct {
467 name string
469 gcPercent int
470 globalsBytes uint64
471 nCores int
473 allocRate float64Stream // > 0, KiB / cpu-ms
474 scanRate float64Stream // > 0, KiB / cpu-ms
475 growthRate float64Stream // > 0
476 scannableFrac float64Stream // Clamped to [0, 1]
477 stackBytes float64Stream // Multiple of 2048.
478 length int
480 checker func(*testing.T, []gcCycleResult)
483 // minRate is an arbitrary minimum for allocRate, scanRate, and growthRate.
484 // These values just cannot be zero.
485 const minRate = 0.0001
487 func (e *gcExecTest) next() gcCycle {
488 return gcCycle{
489 allocRate: e.allocRate.min(minRate)(),
490 scanRate: e.scanRate.min(minRate)(),
491 growthRate: e.growthRate.min(minRate)(),
492 scannableFrac: e.scannableFrac.limit(0, 1)(),
493 stackBytes: uint64(e.stackBytes.quantize(2048).min(0)()),
497 func (e *gcExecTest) check(t *testing.T, results []gcCycleResult) {
498 t.Helper()
500 // Do some basic general checks first.
501 n := len(results)
502 switch n {
503 case 0:
504 t.Fatal("no results passed to check")
505 return
506 case 1:
507 if results[0].cycle != 1 {
508 t.Error("first cycle has incorrect number")
510 default:
511 if results[n-1].cycle != results[n-2].cycle+1 {
512 t.Error("cycle numbers out of order")
515 if u := results[n-1].gcUtilization; u < 0 || u > 1 {
516 t.Fatal("GC utilization not within acceptable bounds")
518 if s := results[n-1].heapScannable; s < 0 {
519 t.Fatal("heapScannable is negative")
521 if e.checker == nil {
522 t.Fatal("test-specific checker is missing")
525 // Run the test-specific checker.
526 e.checker(t, results)
529 type gcCycle struct {
530 allocRate float64
531 scanRate float64
532 growthRate float64
533 scannableFrac float64
534 stackBytes uint64
537 type gcCycleResult struct {
538 cycle int
540 // These come directly from the pacer, so uint64.
541 heapLive uint64
542 heapTrigger uint64
543 heapGoal uint64
544 heapPeak uint64
546 // These are produced by the simulation, so int64 and
547 // float64 are more appropriate, so that we can check for
548 // bad states in the simulation.
549 heapScannable int64
550 gcUtilization float64
553 func (r *gcCycleResult) goalRatio() float64 {
554 return float64(r.heapPeak) / float64(r.heapGoal)
557 func (r *gcCycleResult) String() string {
558 return fmt.Sprintf("%d %2.1f%% %d->%d->%d (goal: %d)", r.cycle, r.gcUtilization*100, r.heapLive, r.heapTrigger, r.heapPeak, r.heapGoal)
561 func assertInEpsilon(t *testing.T, name string, a, b, epsilon float64) {
562 t.Helper()
563 assertInRange(t, name, a, b-epsilon, b+epsilon)
566 func assertInRange(t *testing.T, name string, a, min, max float64) {
567 t.Helper()
568 if a < min || a > max {
569 t.Errorf("%s not in range (%f, %f): %f", name, min, max, a)
573 // float64Stream is a function that generates an infinite stream of
574 // float64 values when called repeatedly.
575 type float64Stream func() float64
577 // constant returns a stream that generates the value c.
578 func constant(c float64) float64Stream {
579 return func() float64 {
580 return c
584 // unit returns a stream that generates a single peak with
585 // amplitude amp, followed by zeroes.
587 // In another manner of speaking, this is the Kronecker delta.
588 func unit(amp float64) float64Stream {
589 dropped := false
590 return func() float64 {
591 if dropped {
592 return 0
594 dropped = true
595 return amp
599 // oscillate returns a stream that oscillates sinusoidally
600 // with the given amplitude, phase, and period.
601 func oscillate(amp, phase float64, period int) float64Stream {
602 var cycle int
603 return func() float64 {
604 p := float64(cycle)/float64(period)*2*math.Pi + phase
605 cycle++
606 if cycle == period {
607 cycle = 0
609 return math.Sin(p) * amp
613 // ramp returns a stream that moves from zero to height
614 // over the course of length steps.
615 func ramp(height float64, length int) float64Stream {
616 var cycle int
617 return func() float64 {
618 h := height * float64(cycle) / float64(length)
619 if cycle < length {
620 cycle++
622 return h
626 // random returns a stream that generates random numbers
627 // between -amp and amp.
628 func random(amp float64, seed int64) float64Stream {
629 r := rand.New(rand.NewSource(seed))
630 return func() float64 {
631 return ((r.Float64() - 0.5) * 2) * amp
635 // delay returns a new stream which is a buffered version
636 // of f: it returns zero for cycles steps, followed by f.
637 func (f float64Stream) delay(cycles int) float64Stream {
638 zeroes := 0
639 return func() float64 {
640 if zeroes < cycles {
641 zeroes++
642 return 0
644 return f()
648 // scale returns a new stream that is f, but attenuated by a
649 // constant factor.
650 func (f float64Stream) scale(amt float64) float64Stream {
651 return func() float64 {
652 return f() * amt
656 // offset returns a new stream that is f but offset by amt
657 // at each step.
658 func (f float64Stream) offset(amt float64) float64Stream {
659 return func() float64 {
660 old := f()
661 return old + amt
665 // sum returns a new stream that is the sum of all input streams
666 // at each step.
667 func (f float64Stream) sum(fs ...float64Stream) float64Stream {
668 return func() float64 {
669 sum := f()
670 for _, s := range fs {
671 sum += s()
673 return sum
677 // quantize returns a new stream that rounds f to a multiple
678 // of mult at each step.
679 func (f float64Stream) quantize(mult float64) float64Stream {
680 return func() float64 {
681 r := f() / mult
682 if r < 0 {
683 return math.Ceil(r) * mult
685 return math.Floor(r) * mult
689 // min returns a new stream that replaces all values produced
690 // by f lower than min with min.
691 func (f float64Stream) min(min float64) float64Stream {
692 return func() float64 {
693 return math.Max(min, f())
697 // max returns a new stream that replaces all values produced
698 // by f higher than max with max.
699 func (f float64Stream) max(max float64) float64Stream {
700 return func() float64 {
701 return math.Min(max, f())
705 // limit returns a new stream that replaces all values produced
706 // by f lower than min with min and higher than max with max.
707 func (f float64Stream) limit(min, max float64) float64Stream {
708 return func() float64 {
709 v := f()
710 if v < min {
711 v = min
712 } else if v > max {
713 v = max
715 return v
719 func FuzzPIController(f *testing.F) {
720 isNormal := func(x float64) bool {
721 return !math.IsInf(x, 0) && !math.IsNaN(x)
723 isPositive := func(x float64) bool {
724 return isNormal(x) && x > 0
726 // Seed with constants from controllers in the runtime.
727 // It's not critical that we keep these in sync, they're just
728 // reasonable seed inputs.
729 f.Add(0.3375, 3.2e6, 1e9, 0.001, 1000.0, 0.01)
730 f.Add(0.9, 4.0, 1000.0, -1000.0, 1000.0, 0.84)
731 f.Fuzz(func(t *testing.T, kp, ti, tt, min, max, setPoint float64) {
732 // Ignore uninteresting invalid parameters. These parameters
733 // are constant, so in practice surprising values will be documented
734 // or will be other otherwise immediately visible.
736 // We just want to make sure that given a non-Inf, non-NaN input,
737 // we always get a non-Inf, non-NaN output.
738 if !isPositive(kp) || !isPositive(ti) || !isPositive(tt) {
739 return
741 if !isNormal(min) || !isNormal(max) || min > max {
742 return
744 // Use a random source, but make it deterministic.
745 rs := rand.New(rand.NewSource(800))
746 randFloat64 := func() float64 {
747 return math.Float64frombits(rs.Uint64())
749 p := NewPIController(kp, ti, tt, min, max)
750 state := float64(0)
751 for i := 0; i < 100; i++ {
752 input := randFloat64()
753 // Ignore the "ok" parameter. We're just trying to break it.
754 // state is intentionally completely uncorrelated with the input.
755 var ok bool
756 state, ok = p.Next(input, setPoint, 1.0)
757 if !isNormal(state) {
758 t.Fatalf("got NaN or Inf result from controller: %f %v", state, ok)