1 # Copyright 2014 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file.
8 # Special import necessary because filename contains dash characters.
9 bisect_perf_module
= __import__('bisect-perf-regression')
12 RESULTS_OUTPUT
= """RESULT write_operations: write_operations= 23089 count
13 RESULT read_bytes_gpu: read_bytes_gpu= 35201 kb
14 RESULT write_bytes_gpu: write_bytes_gpu= 542 kb
15 RESULT telemetry_page_measurement_results: num_failed= 0 count
16 RESULT telemetry_page_measurement_results: num_errored= 0 count
17 *RESULT Total: Total_ref= %(value)s
21 class BisectPerfRegressionTest(unittest
.TestCase
):
22 """Test case for top-level functions in the bisect-perf-regrssion module."""
25 """Sets up the test environment before each test method."""
29 """Cleans up the test environment after each test method."""
32 def testParseDEPSStringManually(self
):
33 """Tests DEPS parsing."""
34 bisect_options
= bisect_perf_module
.BisectOptions()
35 bisect_instance
= bisect_perf_module
.BisectPerformanceMetrics(
38 deps_file_contents
= """
41 '@ac4a9f31fe2610bd146857bbd55d7a260003a888',
43 'https://chromium.googlesource.com/chromium/blink.git',
45 'https://chromium.googlesource.com',
47 '@e01ac0a267d1017288bc67fa3c366b10469d8a24',
49 '74697cf2064c0a2c0d7e1b1b28db439286766a05'
52 # Should only expect svn/git revisions to come through, and urls to be
54 expected_vars_dict
= {
55 'ffmpeg_hash': '@ac4a9f31fe2610bd146857bbd55d7a260003a888',
56 'webkit_rev': '@e01ac0a267d1017288bc67fa3c366b10469d8a24',
57 'angle_revision': '74697cf2064c0a2c0d7e1b1b28db439286766a05'
59 vars_dict
= bisect_instance
._ParseRevisionsFromDEPSFileManually
(
61 self
.assertEqual(vars_dict
, expected_vars_dict
)
63 def testCalculateTruncatedMeanRaisesError(self
):
64 """CalculateTrunctedMean raises an error when passed an empty list."""
65 with self
.assertRaises(TypeError):
66 bisect_perf_module
.CalculateTruncatedMean([], 0)
68 def testCalculateMeanSingleNum(self
):
69 """Tests the CalculateMean function with a single number."""
70 self
.assertEqual(3.0, bisect_perf_module
.CalculateMean([3]))
72 def testCalculateMeanShortList(self
):
73 """Tests the CalculateMean function with a short list."""
74 self
.assertEqual(0.5, bisect_perf_module
.CalculateMean([-3, 0, 1, 4]))
76 def testCalculateMeanCompareAlternateImplementation(self
):
77 """Tests CalculateMean by comparing against an alternate implementation."""
78 def AlternateMeanFunction(values
):
79 """Simple arithmetic mean function."""
80 return sum(values
) / float(len(values
))
81 test_values_lists
= [[1], [5, 6.5, 1.2, 3], [-3, 0, 1, 4],
82 [-3, -1, 0.12, 0.752, 3.33, 8, 16, 32, 439]]
83 for values
in test_values_lists
:
85 AlternateMeanFunction(values
),
86 bisect_perf_module
.CalculateMean(values
))
88 def testCalculateConfidence(self
):
89 """Tests the confidence calculation."""
90 bad_values
= [[0, 1], [1, 2]]
91 good_values
= [[6, 7], [7, 8]]
92 # Closest means are mean(1, 2) and mean(6, 7).
94 # Standard deviation of [n-1, n, n, n+1] is 0.8165.
95 stddev_sum
= 0.8165 + 0.8165
96 # Expected confidence is an int in the range [0, 100].
97 expected_confidence
= min(100, int(100 * distance
/ float(stddev_sum
)))
100 bisect_perf_module
.CalculateConfidence(bad_values
, good_values
))
102 def testCalculateConfidence0(self
):
103 """Tests the confidence calculation when it's expected to be 0."""
104 bad_values
= [[0, 1], [1, 2], [4, 5], [0, 2]]
105 good_values
= [[4, 5], [6, 7], [7, 8]]
106 # Both groups have value lists with means of 4.5, which means distance
107 # between groups is zero, and thus confidence is zero.
109 0, bisect_perf_module
.CalculateConfidence(bad_values
, good_values
))
111 def testCalculateConfidence100(self
):
112 """Tests the confidence calculation when it's expected to be 100."""
113 bad_values
= [[1, 1], [1, 1]]
114 good_values
= [[1.2, 1.2], [1.2, 1.2]]
115 # Standard deviation in both groups is zero, so confidence is 100.
117 100, bisect_perf_module
.CalculateConfidence(bad_values
, good_values
))
119 def testCalculateRelativeChange(self
):
120 """Tests the common cases for calculating relative change."""
121 # The change is relative to the first value, regardless of which is bigger.
122 self
.assertEqual(0.5, bisect_perf_module
.CalculateRelativeChange(1.0, 1.5))
123 self
.assertEqual(0.5, bisect_perf_module
.CalculateRelativeChange(2.0, 1.0))
125 def testCalculateRelativeChangeFromZero(self
):
126 """Tests what happens when relative change from zero is calculated."""
127 # If the first number is zero, then the result is not a number.
128 self
.assertEqual(0, bisect_perf_module
.CalculateRelativeChange(0, 0))
130 math
.isnan(bisect_perf_module
.CalculateRelativeChange(0, 1)))
132 math
.isnan(bisect_perf_module
.CalculateRelativeChange(0, -1)))
134 def testCalculateRelativeChangeWithNegatives(self
):
135 """Tests that relative change given is always positive."""
136 self
.assertEqual(3.0, bisect_perf_module
.CalculateRelativeChange(-1, 2))
137 self
.assertEqual(3.0, bisect_perf_module
.CalculateRelativeChange(1, -2))
138 self
.assertEqual(1.0, bisect_perf_module
.CalculateRelativeChange(-1, -2))
140 def testTryParseResultValuesFromOutputWithSingleValue(self
):
141 """Tests result pattern <*>RESULT <graph>: <trace>= <value>"""
142 bisect_options
= bisect_perf_module
.BisectOptions()
143 bisect_instance
= bisect_perf_module
.BisectPerformanceMetrics(
144 None, bisect_options
)
145 metrics
= ['Total', 'Total_ref']
147 [66.88], bisect_instance
.TryParseResultValuesFromOutput(
148 metrics
, RESULTS_OUTPUT
% {'value': '66.88 kb'}))
150 [66.88], bisect_instance
.TryParseResultValuesFromOutput(
151 metrics
, RESULTS_OUTPUT
% {'value': '66.88kb'}))
153 [66.88], bisect_instance
.TryParseResultValuesFromOutput(
154 metrics
, RESULTS_OUTPUT
% {'value': ' 66.88 '}))
156 [-66.88], bisect_instance
.TryParseResultValuesFromOutput(
157 metrics
, RESULTS_OUTPUT
% {'value': ' -66.88 kb'}))
159 [66], bisect_instance
.TryParseResultValuesFromOutput(
160 metrics
, RESULTS_OUTPUT
% {'value': '66 kb'}))
162 [.66], bisect_instance
.TryParseResultValuesFromOutput(
163 metrics
, RESULTS_OUTPUT
% {'value': '.66 kb'}))
165 [], bisect_instance
.TryParseResultValuesFromOutput(
166 metrics
, RESULTS_OUTPUT
% {'value': '. kb'}))
168 [], bisect_instance
.TryParseResultValuesFromOutput(
169 metrics
, RESULTS_OUTPUT
% {'value': 'aaa kb'}))
171 def testTryParseResultValuesFromOutputWithMulitValue(self
):
172 """Tests result pattern <*>RESULT <graph>: <trace>= [<value>,<value>, ..]"""
173 bisect_options
= bisect_perf_module
.BisectOptions()
174 bisect_instance
= bisect_perf_module
.BisectPerformanceMetrics(
175 None, bisect_options
)
176 metrics
= ['Total', 'Total_ref']
178 [66.88], bisect_instance
.TryParseResultValuesFromOutput(
179 metrics
, RESULTS_OUTPUT
% {'value': '[66.88] kb'}))
181 [66.88, 99.44], bisect_instance
.TryParseResultValuesFromOutput(
182 metrics
, RESULTS_OUTPUT
% {'value': '[66.88, 99.44]kb'}))
184 [66.88, 99.44], bisect_instance
.TryParseResultValuesFromOutput(
185 metrics
, RESULTS_OUTPUT
% {'value': '[ 66.88, 99.44 ]'}))
187 [-66.88, 99.44], bisect_instance
.TryParseResultValuesFromOutput(
188 metrics
, RESULTS_OUTPUT
% {'value': '[-66.88,99.44] kb'}))
190 [-66, 99], bisect_instance
.TryParseResultValuesFromOutput(
191 metrics
, RESULTS_OUTPUT
% {'value': '[-66,99] kb'}))
193 [-66, 99], bisect_instance
.TryParseResultValuesFromOutput(
194 metrics
, RESULTS_OUTPUT
% {'value': '[-66,99,] kb'}))
196 [.66, .99], bisect_instance
.TryParseResultValuesFromOutput(
197 metrics
, RESULTS_OUTPUT
% {'value': '[.66,.99] kb'}))
199 [], bisect_instance
.TryParseResultValuesFromOutput(
200 metrics
, RESULTS_OUTPUT
% {'value': '[] kb'}))
202 [], bisect_instance
.TryParseResultValuesFromOutput(
203 metrics
, RESULTS_OUTPUT
% {'value': '[-66,abc] kb'}))
205 def testTryParseResultValuesFromOutputWithMeanStd(self
):
206 """Tests result pattern <*>RESULT <graph>: <trace>= {<mean, std}"""
207 bisect_options
= bisect_perf_module
.BisectOptions()
208 bisect_instance
= bisect_perf_module
.BisectPerformanceMetrics(
209 None, bisect_options
)
210 metrics
= ['Total', 'Total_ref']
212 [33.22], bisect_instance
.TryParseResultValuesFromOutput(
213 metrics
, RESULTS_OUTPUT
% {'value': '{33.22, 3.6} kb'}))
215 [33.22], bisect_instance
.TryParseResultValuesFromOutput(
216 metrics
, RESULTS_OUTPUT
% {'value': '{33.22,3.6}kb'}))
218 [33.22], bisect_instance
.TryParseResultValuesFromOutput(
219 metrics
, RESULTS_OUTPUT
% {'value': '{33.22,3.6} kb'}))
221 [33.22], bisect_instance
.TryParseResultValuesFromOutput(
222 metrics
, RESULTS_OUTPUT
% {'value': '{ 33.22,3.6 }kb'}))
224 [-33.22], bisect_instance
.TryParseResultValuesFromOutput(
225 metrics
, RESULTS_OUTPUT
% {'value': '{-33.22,3.6}kb'}))
227 [22], bisect_instance
.TryParseResultValuesFromOutput(
228 metrics
, RESULTS_OUTPUT
% {'value': '{22,6}kb'}))
230 [.22], bisect_instance
.TryParseResultValuesFromOutput(
231 metrics
, RESULTS_OUTPUT
% {'value': '{.22,6}kb'}))
233 [], bisect_instance
.TryParseResultValuesFromOutput(
234 metrics
, RESULTS_OUTPUT
% {'value': '{.22,6, 44}kb'}))
236 [], bisect_instance
.TryParseResultValuesFromOutput(
237 metrics
, RESULTS_OUTPUT
% {'value': '{}kb'}))
239 [], bisect_instance
.TryParseResultValuesFromOutput(
240 metrics
, RESULTS_OUTPUT
% {'value': '{XYZ}kb'}))
243 if __name__
== '__main__':