Gallery.app: Disptach 'useraction' event when a user clicks the arrow buttons in...
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blob24b051834e3d7cc2c1db66eac9b833e37b474e6e
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
36 """
38 import copy
39 import datetime
40 import errno
41 import hashlib
42 import math
43 import optparse
44 import os
45 import re
46 import shlex
47 import shutil
48 import StringIO
49 import subprocess
50 import sys
51 import time
52 import zipfile
54 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
56 import bisect_utils
57 import post_perf_builder_job as bisect_builder
58 from telemetry.page import cloud_storage
60 # The additional repositories that might need to be bisected.
61 # If the repository has any dependant repositories (such as skia/src needs
62 # skia/include and skia/gyp to be updated), specify them in the 'depends'
63 # so that they're synced appropriately.
64 # Format is:
65 # src: path to the working directory.
66 # recurse: True if this repositry will get bisected.
67 # depends: A list of other repositories that are actually part of the same
68 # repository in svn.
69 # svn: Needed for git workflow to resolve hashes to svn revisions.
70 # from: Parent depot that must be bisected before this is bisected.
71 # deps_var: Key name in vars varible in DEPS file that has revision information.
72 DEPOT_DEPS_NAME = {
73 'chromium' : {
74 "src" : "src",
75 "recurse" : True,
76 "depends" : None,
77 "from" : ['cros', 'android-chrome'],
78 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79 'deps_var': 'chromium_rev'
81 'webkit' : {
82 "src" : "src/third_party/WebKit",
83 "recurse" : True,
84 "depends" : None,
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87 'deps_var': 'webkit_revision'
89 'angle' : {
90 "src" : "src/third_party/angle",
91 "src_old" : "src/third_party/angle_dx11",
92 "recurse" : True,
93 "depends" : None,
94 "from" : ['chromium'],
95 "platform": 'nt',
96 'deps_var': 'angle_revision'
98 'v8' : {
99 "src" : "src/v8",
100 "recurse" : True,
101 "depends" : None,
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105 'deps_var': 'v8_revision'
107 'v8_bleeding_edge' : {
108 "src" : "src/v8_bleeding_edge",
109 "recurse" : True,
110 "depends" : None,
111 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
112 "from" : ['v8'],
113 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'deps_var': 'v8_revision'
116 'skia/src' : {
117 "src" : "src/third_party/skia/src",
118 "recurse" : True,
119 "svn" : "http://skia.googlecode.com/svn/trunk/src",
120 "depends" : ['skia/include', 'skia/gyp'],
121 "from" : ['chromium'],
122 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 'deps_var': 'skia_revision'
125 'skia/include' : {
126 "src" : "src/third_party/skia/include",
127 "recurse" : False,
128 "svn" : "http://skia.googlecode.com/svn/trunk/include",
129 "depends" : None,
130 "from" : ['chromium'],
131 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
132 'deps_var': 'None'
134 'skia/gyp' : {
135 "src" : "src/third_party/skia/gyp",
136 "recurse" : False,
137 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
138 "depends" : None,
139 "from" : ['chromium'],
140 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
141 'deps_var': 'None'
145 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
146 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147 CROS_VERSION_PATTERN = 'new version number from %s'
148 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
149 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
150 'testing_rsa')
151 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
152 'mod_for_test_scripts', 'ssh_keys',
153 'testing_rsa')
155 BUILD_RESULT_SUCCEED = 0
156 BUILD_RESULT_FAIL = 1
157 BUILD_RESULT_SKIPPED = 2
159 # Maximum time in seconds to wait after posting build request to tryserver.
160 # TODO: Change these values based on the actual time taken by buildbots on
161 # the tryserver.
162 MAX_MAC_BUILD_TIME = 14400
163 MAX_WIN_BUILD_TIME = 14400
164 MAX_LINUX_BUILD_TIME = 14400
166 # Patch template to add a new file, DEPS.sha under src folder.
167 # This file contains SHA1 value of the DEPS changes made while bisecting
168 # dependency repositories. This patch send along with DEPS patch to tryserver.
169 # When a build requested is posted with a patch, bisect builders on tryserver,
170 # once build is produced, it reads SHA value from this file and appends it
171 # to build archive filename.
172 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
173 new file mode 100644
174 --- /dev/null
175 +++ src/DEPS.sha
176 @@ -0,0 +1 @@
177 +%(deps_sha)s
180 # The possible values of the --bisect_mode flag, which determines what to
181 # use when classifying a revision as "good" or "bad".
182 BISECT_MODE_MEAN = 'mean'
183 BISECT_MODE_STD_DEV = 'std_dev'
184 BISECT_MODE_RETURN_CODE = 'return_code'
187 def _AddAdditionalDepotInfo(depot_info):
188 """Adds additional depot info to the global depot variables."""
189 global DEPOT_DEPS_NAME
190 global DEPOT_NAMES
191 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
192 depot_info.items())
193 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
196 def CalculateTruncatedMean(data_set, truncate_percent):
197 """Calculates the truncated mean of a set of values.
199 Note that this isn't just the mean of the set of values with the highest
200 and lowest values discarded; the non-discarded values are also weighted
201 differently depending how many values are discarded.
203 Args:
204 data_set: Non-empty list of values.
205 truncate_percent: The % from the upper and lower portions of the data set
206 to discard, expressed as a value in [0, 1].
208 Returns:
209 The truncated mean as a float.
211 Raises:
212 TypeError: The data set was empty after discarding values.
214 if len(data_set) > 2:
215 data_set = sorted(data_set)
217 discard_num_float = len(data_set) * truncate_percent
218 discard_num_int = int(math.floor(discard_num_float))
219 kept_weight = len(data_set) - discard_num_float * 2
221 data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
223 weight_left = 1.0 - (discard_num_float - discard_num_int)
225 if weight_left < 1:
226 # If the % to discard leaves a fractional portion, need to weight those
227 # values.
228 unweighted_vals = data_set[1:len(data_set)-1]
229 weighted_vals = [data_set[0], data_set[len(data_set)-1]]
230 weighted_vals = [w * weight_left for w in weighted_vals]
231 data_set = weighted_vals + unweighted_vals
232 else:
233 kept_weight = len(data_set)
235 truncated_mean = reduce(lambda x, y: float(x) + float(y),
236 data_set) / kept_weight
238 return truncated_mean
241 def CalculateMean(values):
242 """Calculates the arithmetic mean of a list of values."""
243 return CalculateTruncatedMean(values, 0.0)
246 def CalculateConfidence(good_results_lists, bad_results_lists):
247 """Calculates a confidence percentage.
249 This is calculated based on how distinct the "good" and "bad" values are,
250 and how noisy the results are. More precisely, the confidence is the quotient
251 of the difference between the closest values across the good and bad groups
252 and the sum of the standard deviations of the good and bad groups.
254 TODO(qyearsley): Replace this confidence function with a function that
255 uses a Student's t-test. The confidence would be (1 - p-value), where
256 p-value is the probability of obtaining the given a set of good and bad
257 values just by chance.
259 Args:
260 good_results_lists: A list of lists of "good" result numbers.
261 bad_results_lists: A list of lists of "bad" result numbers.
263 Returns:
264 A number between in the range [0, 100].
266 # Get the distance between the two groups.
267 means_good = map(CalculateMean, good_results_lists)
268 means_bad = map(CalculateMean, bad_results_lists)
269 bounds_good = (min(means_good), max(means_good))
270 bounds_bad = (min(means_bad), max(means_bad))
271 dist_between_groups = min(
272 math.fabs(bounds_bad[1] - bounds_good[0]),
273 math.fabs(bounds_bad[0] - bounds_good[1]))
275 # Get the sum of the standard deviations of the two groups.
276 good_results_flattened = sum(good_results_lists, [])
277 bad_results_flattened = sum(bad_results_lists, [])
278 stddev_good = CalculateStandardDeviation(good_results_flattened)
279 stddev_bad = CalculateStandardDeviation(bad_results_flattened)
280 stddev_sum = stddev_good + stddev_bad
282 confidence = dist_between_groups / (max(0.0001, stddev_sum))
283 confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
284 return confidence
287 def CalculateStandardDeviation(values):
288 """Calculates the sample standard deviation of the given list of values."""
289 if len(values) == 1:
290 return 0.0
292 mean = CalculateMean(values)
293 differences_from_mean = [float(x) - mean for x in values]
294 squared_differences = [float(x * x) for x in differences_from_mean]
295 variance = sum(squared_differences) / (len(values) - 1)
296 std_dev = math.sqrt(variance)
298 return std_dev
301 def CalculateRelativeChange(before, after):
302 """Returns the relative change of before and after, relative to before.
304 There are several different ways to define relative difference between
305 two numbers; sometimes it is defined as relative to the smaller number,
306 or to the mean of the two numbers. This version returns the difference
307 relative to the first of the two numbers.
309 Args:
310 before: A number representing an earlier value.
311 after: Another number, representing a later value.
313 Returns:
314 A non-negative floating point number; 0.1 represents a 10% change.
316 if before == after:
317 return 0.0
318 if before == 0:
319 return float('nan')
320 difference = after - before
321 return math.fabs(difference / before)
324 def CalculatePooledStandardError(work_sets):
325 numerator = 0.0
326 denominator1 = 0.0
327 denominator2 = 0.0
329 for current_set in work_sets:
330 std_dev = CalculateStandardDeviation(current_set)
331 numerator += (len(current_set) - 1) * std_dev ** 2
332 denominator1 += len(current_set) - 1
333 denominator2 += 1.0 / len(current_set)
335 if denominator1:
336 return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
337 return 0.0
340 def CalculateStandardError(values):
341 """Calculates the standard error of a list of values."""
342 if len(values) <= 1:
343 return 0.0
345 std_dev = CalculateStandardDeviation(values)
347 return std_dev / math.sqrt(len(values))
350 def IsStringFloat(string_to_check):
351 """Checks whether or not the given string can be converted to a floating
352 point number.
354 Args:
355 string_to_check: Input string to check if it can be converted to a float.
357 Returns:
358 True if the string can be converted to a float.
360 try:
361 float(string_to_check)
363 return True
364 except ValueError:
365 return False
368 def IsStringInt(string_to_check):
369 """Checks whether or not the given string can be converted to a integer.
371 Args:
372 string_to_check: Input string to check if it can be converted to an int.
374 Returns:
375 True if the string can be converted to an int.
377 try:
378 int(string_to_check)
380 return True
381 except ValueError:
382 return False
385 def IsWindowsHost():
386 """Checks whether or not the script is running on Windows.
388 Returns:
389 True if running on Windows.
391 return sys.platform == 'cygwin' or sys.platform.startswith('win')
394 def Is64BitWindows():
395 """Returns whether or not Windows is a 64-bit version.
397 Returns:
398 True if Windows is 64-bit, False if 32-bit.
400 platform = os.environ['PROCESSOR_ARCHITECTURE']
401 try:
402 platform = os.environ['PROCESSOR_ARCHITEW6432']
403 except KeyError:
404 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
405 pass
407 return platform in ['AMD64', 'I64']
410 def IsLinuxHost():
411 """Checks whether or not the script is running on Linux.
413 Returns:
414 True if running on Linux.
416 return sys.platform.startswith('linux')
419 def IsMacHost():
420 """Checks whether or not the script is running on Mac.
422 Returns:
423 True if running on Mac.
425 return sys.platform.startswith('darwin')
428 def GetSHA1HexDigest(contents):
429 """Returns secured hash containing hexadecimal for the given contents."""
430 return hashlib.sha1(contents).hexdigest()
433 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
434 """Gets the archive file name for the given revision."""
435 def PlatformName():
436 """Return a string to be used in paths for the platform."""
437 if IsWindowsHost():
438 # Build archive for x64 is still stored with 'win32'suffix
439 # (chromium_utils.PlatformName()).
440 if Is64BitWindows() and target_arch == 'x64':
441 return 'win32'
442 return 'win32'
443 if IsLinuxHost():
444 # Android builds too are archived with full-build-linux* prefix.
445 return 'linux'
446 if IsMacHost():
447 return 'mac'
448 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
450 base_name = 'full-build-%s' % PlatformName()
451 if not build_revision:
452 return base_name
453 if patch_sha:
454 build_revision = '%s_%s' % (build_revision , patch_sha)
455 return '%s_%s.zip' % (base_name, build_revision)
458 def GetRemoteBuildPath(build_revision, target_platform='chromium',
459 target_arch='ia32', patch_sha=None):
460 """Compute the url to download the build from."""
461 def GetGSRootFolderName(target_platform):
462 """Gets Google Cloud Storage root folder names"""
463 if IsWindowsHost():
464 if Is64BitWindows() and target_arch == 'x64':
465 return 'Win x64 Builder'
466 return 'Win Builder'
467 if IsLinuxHost():
468 if target_platform == 'android':
469 return 'android_perf_rel'
470 return 'Linux Builder'
471 if IsMacHost():
472 return 'Mac Builder'
473 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
475 base_filename = GetZipFileName(
476 build_revision, target_arch, patch_sha)
477 builder_folder = GetGSRootFolderName(target_platform)
478 return '%s/%s' % (builder_folder, base_filename)
481 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
482 """Fetches file(s) from the Google Cloud Storage.
484 Args:
485 bucket_name: Google Storage bucket name.
486 source_path: Source file path.
487 destination_path: Destination file path.
489 Returns:
490 Downloaded file path if exisits, otherwise None.
492 target_file = os.path.join(destination_path, os.path.basename(source_path))
493 try:
494 if cloud_storage.Exists(bucket_name, source_path):
495 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
496 cloud_storage.Get(bucket_name, source_path, destination_path)
497 if os.path.exists(target_file):
498 return target_file
499 else:
500 print ('File gs://%s/%s not found in cloud storage.' % (
501 bucket_name, source_path))
502 except Exception as e:
503 print 'Something went wrong while fetching file from cloud: %s' % e
504 if os.path.exists(target_file):
505 os.remove(target_file)
506 return None
509 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
510 def MaybeMakeDirectory(*path):
511 """Creates an entire path, if it doesn't already exist."""
512 file_path = os.path.join(*path)
513 try:
514 os.makedirs(file_path)
515 except OSError, e:
516 if e.errno != errno.EEXIST:
517 return False
518 return True
521 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
522 def ExtractZip(filename, output_dir, verbose=True):
523 """ Extract the zip archive in the output directory."""
524 MaybeMakeDirectory(output_dir)
526 # On Linux and Mac, we use the unzip command as it will
527 # handle links and file bits (executable), which is much
528 # easier then trying to do that with ZipInfo options.
530 # The Mac Version of unzip unfortunately does not support Zip64, whereas
531 # the python module does, so we have to fallback to the python zip module
532 # on Mac if the filesize is greater than 4GB.
534 # On Windows, try to use 7z if it is installed, otherwise fall back to python
535 # zip module and pray we don't have files larger than 512MB to unzip.
536 unzip_cmd = None
537 if ((IsMacHost() and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
538 or IsLinuxHost()):
539 unzip_cmd = ['unzip', '-o']
540 elif IsWindowsHost() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
541 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
543 if unzip_cmd:
544 # Make sure path is absolute before changing directories.
545 filepath = os.path.abspath(filename)
546 saved_dir = os.getcwd()
547 os.chdir(output_dir)
548 command = unzip_cmd + [filepath]
549 result = RunProcess(command)
550 os.chdir(saved_dir)
551 if result:
552 raise IOError('unzip failed: %s => %s' % (str(command), result))
553 else:
554 assert IsWindowsHost() or IsMacHost()
555 zf = zipfile.ZipFile(filename)
556 for name in zf.namelist():
557 if verbose:
558 print 'Extracting %s' % name
559 zf.extract(name, output_dir)
560 if IsMacHost():
561 # Restore permission bits.
562 os.chmod(os.path.join(output_dir, name),
563 zf.getinfo(name).external_attr >> 16L)
566 def RunProcess(command):
567 """Runs an arbitrary command.
569 If output from the call is needed, use RunProcessAndRetrieveOutput instead.
571 Args:
572 command: A list containing the command and args to execute.
574 Returns:
575 The return code of the call.
577 # On Windows, use shell=True to get PATH interpretation.
578 shell = IsWindowsHost()
579 return subprocess.call(command, shell=shell)
582 def RunProcessAndRetrieveOutput(command, cwd=None):
583 """Runs an arbitrary command, returning its output and return code.
585 Since output is collected via communicate(), there will be no output until
586 the call terminates. If you need output while the program runs (ie. so
587 that the buildbot doesn't terminate the script), consider RunProcess().
589 Args:
590 command: A list containing the command and args to execute.
591 cwd: A directory to change to while running the command. The command can be
592 relative to this directory. If this is None, the command will be run in
593 the current directory.
595 Returns:
596 A tuple of the output and return code.
598 if cwd:
599 original_cwd = os.getcwd()
600 os.chdir(cwd)
602 # On Windows, use shell=True to get PATH interpretation.
603 shell = IsWindowsHost()
604 proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE)
605 (output, _) = proc.communicate()
607 if cwd:
608 os.chdir(original_cwd)
610 return (output, proc.returncode)
613 def RunGit(command, cwd=None):
614 """Run a git subcommand, returning its output and return code.
616 Args:
617 command: A list containing the args to git.
618 cwd: A directory to change to while running the git command (optional).
620 Returns:
621 A tuple of the output and return code.
623 command = ['git'] + command
625 return RunProcessAndRetrieveOutput(command, cwd=cwd)
628 def CheckRunGit(command, cwd=None):
629 """Run a git subcommand, returning its output and return code. Asserts if
630 the return code of the call is non-zero.
632 Args:
633 command: A list containing the args to git.
635 Returns:
636 A tuple of the output and return code.
638 (output, return_code) = RunGit(command, cwd=cwd)
640 assert not return_code, 'An error occurred while running'\
641 ' "git %s"' % ' '.join(command)
642 return output
645 def SetBuildSystemDefault(build_system, use_goma):
646 """Sets up any environment variables needed to build with the specified build
647 system.
649 Args:
650 build_system: A string specifying build system. Currently only 'ninja' or
651 'make' are supported."""
652 if build_system == 'ninja':
653 gyp_var = os.getenv('GYP_GENERATORS')
655 if not gyp_var or not 'ninja' in gyp_var:
656 if gyp_var:
657 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
658 else:
659 os.environ['GYP_GENERATORS'] = 'ninja'
661 if IsWindowsHost():
662 os.environ['GYP_DEFINES'] = 'component=shared_library '\
663 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
664 'chromium_win_pch=0'
666 elif build_system == 'make':
667 os.environ['GYP_GENERATORS'] = 'make'
668 else:
669 raise RuntimeError('%s build not supported.' % build_system)
671 if use_goma:
672 os.environ['GYP_DEFINES'] = '%s %s' % (os.getenv('GYP_DEFINES', ''),
673 'use_goma=1')
676 def BuildWithMake(threads, targets, build_type='Release'):
677 cmd = ['make', 'BUILDTYPE=%s' % build_type]
679 if threads:
680 cmd.append('-j%d' % threads)
682 cmd += targets
684 return_code = RunProcess(cmd)
686 return not return_code
689 def BuildWithNinja(threads, targets, build_type='Release'):
690 cmd = ['ninja', '-C', os.path.join('out', build_type)]
692 if threads:
693 cmd.append('-j%d' % threads)
695 cmd += targets
697 return_code = RunProcess(cmd)
699 return not return_code
702 def BuildWithVisualStudio(targets, build_type='Release'):
703 path_to_devenv = os.path.abspath(
704 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
705 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
706 cmd = [path_to_devenv, '/build', build_type, path_to_sln]
708 for t in targets:
709 cmd.extend(['/Project', t])
711 return_code = RunProcess(cmd)
713 return not return_code
716 def WriteStringToFile(text, file_name):
717 try:
718 with open(file_name, "wb") as f:
719 f.write(text)
720 except IOError as e:
721 raise RuntimeError('Error writing to file [%s]' % file_name )
724 def ReadStringFromFile(file_name):
725 try:
726 with open(file_name) as f:
727 return f.read()
728 except IOError as e:
729 raise RuntimeError('Error reading file [%s]' % file_name )
732 def ChangeBackslashToSlashInPatch(diff_text):
733 """Formats file paths in the given text to unix-style paths."""
734 if diff_text:
735 diff_lines = diff_text.split('\n')
736 for i in range(len(diff_lines)):
737 if (diff_lines[i].startswith('--- ') or
738 diff_lines[i].startswith('+++ ')):
739 diff_lines[i] = diff_lines[i].replace('\\', '/')
740 return '\n'.join(diff_lines)
741 return None
744 class Builder(object):
745 """Builder is used by the bisect script to build relevant targets and deploy.
747 def __init__(self, opts):
748 """Performs setup for building with target build system.
750 Args:
751 opts: Options parsed from command line.
753 if IsWindowsHost():
754 if not opts.build_preference:
755 opts.build_preference = 'msvs'
757 if opts.build_preference == 'msvs':
758 if not os.getenv('VS100COMNTOOLS'):
759 raise RuntimeError(
760 'Path to visual studio could not be determined.')
761 else:
762 SetBuildSystemDefault(opts.build_preference, opts.use_goma)
763 else:
764 if not opts.build_preference:
765 if 'ninja' in os.getenv('GYP_GENERATORS'):
766 opts.build_preference = 'ninja'
767 else:
768 opts.build_preference = 'make'
770 SetBuildSystemDefault(opts.build_preference, opts.use_goma)
772 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
773 raise RuntimeError('Failed to set platform environment.')
775 @staticmethod
776 def FromOpts(opts):
777 builder = None
778 if opts.target_platform == 'cros':
779 builder = CrosBuilder(opts)
780 elif opts.target_platform == 'android':
781 builder = AndroidBuilder(opts)
782 elif opts.target_platform == 'android-chrome':
783 builder = AndroidChromeBuilder(opts)
784 else:
785 builder = DesktopBuilder(opts)
786 return builder
788 def Build(self, depot, opts):
789 raise NotImplementedError()
791 def GetBuildOutputDirectory(self, opts, src_dir=None):
792 """Returns the path to the build directory, relative to the checkout root.
794 Assumes that the current working directory is the checkout root.
796 src_dir = src_dir or 'src'
797 if opts.build_preference == 'ninja' or IsLinuxHost():
798 return os.path.join(src_dir, 'out')
799 if IsMacHost():
800 return os.path.join(src_dir, 'xcodebuild')
801 if IsWindowsHost():
802 return os.path.join(src_dir, 'build')
803 raise NotImplementedError('Unexpected platform %s' % sys.platform)
806 class DesktopBuilder(Builder):
807 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
808 def __init__(self, opts):
809 super(DesktopBuilder, self).__init__(opts)
811 def Build(self, depot, opts):
812 """Builds chromium_builder_perf target using options passed into
813 the script.
815 Args:
816 depot: Current depot being bisected.
817 opts: The options parsed from the command line.
819 Returns:
820 True if build was successful.
822 targets = ['chromium_builder_perf']
824 threads = None
825 if opts.use_goma:
826 threads = 64
828 build_success = False
829 if opts.build_preference == 'make':
830 build_success = BuildWithMake(threads, targets, opts.target_build_type)
831 elif opts.build_preference == 'ninja':
832 build_success = BuildWithNinja(threads, targets, opts.target_build_type)
833 elif opts.build_preference == 'msvs':
834 assert IsWindowsHost(), 'msvs is only supported on Windows.'
835 build_success = BuildWithVisualStudio(targets, opts.target_build_type)
836 else:
837 assert False, 'No build system defined.'
838 return build_success
841 class AndroidBuilder(Builder):
842 """AndroidBuilder is used to build on android."""
843 def __init__(self, opts):
844 super(AndroidBuilder, self).__init__(opts)
846 def _GetTargets(self):
847 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
849 def Build(self, depot, opts):
850 """Builds the android content shell and other necessary tools using options
851 passed into the script.
853 Args:
854 depot: Current depot being bisected.
855 opts: The options parsed from the command line.
857 Returns:
858 True if build was successful.
860 threads = None
861 if opts.use_goma:
862 threads = 64
864 build_success = False
865 if opts.build_preference == 'ninja':
866 build_success = BuildWithNinja(
867 threads, self._GetTargets(), opts.target_build_type)
868 else:
869 assert False, 'No build system defined.'
871 return build_success
874 class AndroidChromeBuilder(AndroidBuilder):
875 """AndroidBuilder is used to build on android's chrome."""
876 def __init__(self, opts):
877 super(AndroidChromeBuilder, self).__init__(opts)
879 def _GetTargets(self):
880 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
883 class CrosBuilder(Builder):
884 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
885 target platform."""
886 def __init__(self, opts):
887 super(CrosBuilder, self).__init__(opts)
889 def ImageToTarget(self, opts):
890 """Installs latest image to target specified by opts.cros_remote_ip.
892 Args:
893 opts: Program options containing cros_board and cros_remote_ip.
895 Returns:
896 True if successful.
898 try:
899 # Keys will most likely be set to 0640 after wiping the chroot.
900 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
901 os.chmod(CROS_TEST_KEY_PATH, 0600)
902 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
903 '--remote=%s' % opts.cros_remote_ip,
904 '--board=%s' % opts.cros_board, '--test', '--verbose']
906 return_code = RunProcess(cmd)
907 return not return_code
908 except OSError, e:
909 return False
911 def BuildPackages(self, opts, depot):
912 """Builds packages for cros.
914 Args:
915 opts: Program options containing cros_board.
916 depot: The depot being bisected.
918 Returns:
919 True if successful.
921 cmd = [CROS_SDK_PATH]
923 if depot != 'cros':
924 path_to_chrome = os.path.join(os.getcwd(), '..')
925 cmd += ['--chrome_root=%s' % path_to_chrome]
927 cmd += ['--']
929 if depot != 'cros':
930 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
932 cmd += ['BUILDTYPE=%s' % opts.target_build_type, './build_packages',
933 '--board=%s' % opts.cros_board]
934 return_code = RunProcess(cmd)
936 return not return_code
938 def BuildImage(self, opts, depot):
939 """Builds test image for cros.
941 Args:
942 opts: Program options containing cros_board.
943 depot: The depot being bisected.
945 Returns:
946 True if successful.
948 cmd = [CROS_SDK_PATH]
950 if depot != 'cros':
951 path_to_chrome = os.path.join(os.getcwd(), '..')
952 cmd += ['--chrome_root=%s' % path_to_chrome]
954 cmd += ['--']
956 if depot != 'cros':
957 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
959 cmd += ['BUILDTYPE=%s' % opts.target_build_type, '--', './build_image',
960 '--board=%s' % opts.cros_board, 'test']
962 return_code = RunProcess(cmd)
964 return not return_code
966 def Build(self, depot, opts):
967 """Builds targets using options passed into the script.
969 Args:
970 depot: Current depot being bisected.
971 opts: The options parsed from the command line.
973 Returns:
974 True if build was successful.
976 if self.BuildPackages(opts, depot):
977 if self.BuildImage(opts, depot):
978 return self.ImageToTarget(opts)
979 return False
982 class SourceControl(object):
983 """SourceControl is an abstraction over the underlying source control
984 system used for chromium. For now only git is supported, but in the
985 future, the svn workflow could be added as well."""
986 def __init__(self):
987 super(SourceControl, self).__init__()
989 def SyncToRevisionWithGClient(self, revision):
990 """Uses gclient to sync to the specified revision.
992 ie. gclient sync --revision <revision>
994 Args:
995 revision: The git SHA1 or svn CL (depending on workflow).
997 Returns:
998 The return code of the call.
1000 return bisect_utils.RunGClient(['sync', '--verbose', '--reset', '--force',
1001 '--delete_unversioned_trees', '--nohooks', '--revision', revision])
1003 def SyncToRevisionWithRepo(self, timestamp):
1004 """Uses repo to sync all the underlying git depots to the specified
1005 time.
1007 Args:
1008 timestamp: The unix timestamp to sync to.
1010 Returns:
1011 The return code of the call.
1013 return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
1016 class GitSourceControl(SourceControl):
1017 """GitSourceControl is used to query the underlying source control. """
1018 def __init__(self, opts):
1019 super(GitSourceControl, self).__init__()
1020 self.opts = opts
1022 def IsGit(self):
1023 return True
1025 def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
1026 """Retrieves a list of revisions between |revision_range_start| and
1027 |revision_range_end|.
1029 Args:
1030 revision_range_end: The SHA1 for the end of the range.
1031 revision_range_start: The SHA1 for the beginning of the range.
1033 Returns:
1034 A list of the revisions between |revision_range_start| and
1035 |revision_range_end| (inclusive).
1037 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
1038 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
1039 log_output = CheckRunGit(cmd, cwd=cwd)
1041 revision_hash_list = log_output.split()
1042 revision_hash_list.append(revision_range_start)
1044 return revision_hash_list
1046 def SyncToRevision(self, revision, sync_client=None):
1047 """Syncs to the specified revision.
1049 Args:
1050 revision: The revision to sync to.
1051 use_gclient: Specifies whether or not we should sync using gclient or
1052 just use source control directly.
1054 Returns:
1055 True if successful.
1058 if not sync_client:
1059 results = RunGit(['checkout', revision])[1]
1060 elif sync_client == 'gclient':
1061 results = self.SyncToRevisionWithGClient(revision)
1062 elif sync_client == 'repo':
1063 results = self.SyncToRevisionWithRepo(revision)
1065 return not results
1067 def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
1068 """If an SVN revision is supplied, try to resolve it to a git SHA1.
1070 Args:
1071 revision_to_check: The user supplied revision string that may need to be
1072 resolved to a git SHA1.
1073 depot: The depot the revision_to_check is from.
1074 search: The number of changelists to try if the first fails to resolve
1075 to a git hash. If the value is negative, the function will search
1076 backwards chronologically, otherwise it will search forward.
1078 Returns:
1079 A string containing a git SHA1 hash, otherwise None.
1081 # Android-chrome is git only, so no need to resolve this to anything else.
1082 if depot == 'android-chrome':
1083 return revision_to_check
1085 if depot != 'cros':
1086 if not IsStringInt(revision_to_check):
1087 return revision_to_check
1089 depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
1091 if depot != 'chromium':
1092 depot_svn = DEPOT_DEPS_NAME[depot]['svn']
1094 svn_revision = int(revision_to_check)
1095 git_revision = None
1097 if search > 0:
1098 search_range = xrange(svn_revision, svn_revision + search, 1)
1099 else:
1100 search_range = xrange(svn_revision, svn_revision + search, -1)
1102 for i in search_range:
1103 svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
1104 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
1105 'origin/master']
1107 (log_output, return_code) = RunGit(cmd, cwd=cwd)
1109 assert not return_code, 'An error occurred while running'\
1110 ' "git %s"' % ' '.join(cmd)
1112 if not return_code:
1113 log_output = log_output.strip()
1115 if log_output:
1116 git_revision = log_output
1118 break
1120 return git_revision
1121 else:
1122 if IsStringInt(revision_to_check):
1123 return int(revision_to_check)
1124 else:
1125 cwd = os.getcwd()
1126 os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
1127 'chromiumos-overlay'))
1128 pattern = CROS_VERSION_PATTERN % revision_to_check
1129 cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
1131 git_revision = None
1133 log_output = CheckRunGit(cmd, cwd=cwd)
1134 if log_output:
1135 git_revision = log_output
1136 git_revision = int(log_output.strip())
1137 os.chdir(cwd)
1139 return git_revision
1141 def IsInProperBranch(self):
1142 """Confirms they're in the master branch for performing the bisection.
1143 This is needed or gclient will fail to sync properly.
1145 Returns:
1146 True if the current branch on src is 'master'
1148 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
1149 log_output = CheckRunGit(cmd)
1150 log_output = log_output.strip()
1152 return log_output == "master"
1154 def SVNFindRev(self, revision, cwd=None):
1155 """Maps directly to the 'git svn find-rev' command.
1157 Args:
1158 revision: The git SHA1 to use.
1160 Returns:
1161 An integer changelist #, otherwise None.
1164 cmd = ['svn', 'find-rev', revision]
1166 output = CheckRunGit(cmd, cwd)
1167 svn_revision = output.strip()
1169 if IsStringInt(svn_revision):
1170 return int(svn_revision)
1172 return None
1174 def QueryRevisionInfo(self, revision, cwd=None):
1175 """Gathers information on a particular revision, such as author's name,
1176 email, subject, and date.
1178 Args:
1179 revision: Revision you want to gather information on.
1180 Returns:
1181 A dict in the following format:
1183 'author': %s,
1184 'email': %s,
1185 'date': %s,
1186 'subject': %s,
1187 'body': %s,
1190 commit_info = {}
1192 formats = ['%cN', '%cE', '%s', '%cD', '%b']
1193 targets = ['author', 'email', 'subject', 'date', 'body']
1195 for i in xrange(len(formats)):
1196 cmd = ['log', '--format=%s' % formats[i], '-1', revision]
1197 output = CheckRunGit(cmd, cwd=cwd)
1198 commit_info[targets[i]] = output.rstrip()
1200 return commit_info
1202 def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
1203 """Performs a checkout on a file at the given revision.
1205 Returns:
1206 True if successful.
1208 return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
1210 def RevertFileToHead(self, file_name):
1211 """Unstages a file and returns it to HEAD.
1213 Returns:
1214 True if successful.
1216 # Reset doesn't seem to return 0 on success.
1217 RunGit(['reset', 'HEAD', file_name])
1219 return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
1221 def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
1222 """Returns a list of commits that modified this file.
1224 Args:
1225 filename: Name of file.
1226 revision_start: Start of revision range.
1227 revision_end: End of revision range.
1229 Returns:
1230 Returns a list of commits that touched this file.
1232 cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
1233 filename]
1234 output = CheckRunGit(cmd)
1236 return [o for o in output.split('\n') if o]
1239 class BisectPerformanceMetrics(object):
1240 """This class contains functionality to perform a bisection of a range of
1241 revisions to narrow down where performance regressions may have occurred.
1243 The main entry-point is the Run method.
1246 def __init__(self, source_control, opts):
1247 super(BisectPerformanceMetrics, self).__init__()
1249 self.opts = opts
1250 self.source_control = source_control
1251 self.src_cwd = os.getcwd()
1252 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1253 self.depot_cwd = {}
1254 self.cleanup_commands = []
1255 self.warnings = []
1256 self.builder = Builder.FromOpts(opts)
1258 # This always starts true since the script grabs latest first.
1259 self.was_blink = True
1261 for d in DEPOT_NAMES:
1262 # The working directory of each depot is just the path to the depot, but
1263 # since we're already in 'src', we can skip that part.
1265 self.depot_cwd[d] = os.path.join(
1266 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1268 def PerformCleanup(self):
1269 """Performs cleanup when script is finished."""
1270 os.chdir(self.src_cwd)
1271 for c in self.cleanup_commands:
1272 if c[0] == 'mv':
1273 shutil.move(c[1], c[2])
1274 else:
1275 assert False, 'Invalid cleanup command.'
1277 def GetRevisionList(self, depot, bad_revision, good_revision):
1278 """Retrieves a list of all the commits between the bad revision and
1279 last known good revision."""
1281 revision_work_list = []
1283 if depot == 'cros':
1284 revision_range_start = good_revision
1285 revision_range_end = bad_revision
1287 cwd = os.getcwd()
1288 self.ChangeToDepotWorkingDirectory('cros')
1290 # Print the commit timestamps for every commit in the revision time
1291 # range. We'll sort them and bisect by that. There is a remote chance that
1292 # 2 (or more) commits will share the exact same timestamp, but it's
1293 # probably safe to ignore that case.
1294 cmd = ['repo', 'forall', '-c',
1295 'git log --format=%%ct --before=%d --after=%d' % (
1296 revision_range_end, revision_range_start)]
1297 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1299 assert not return_code, 'An error occurred while running'\
1300 ' "%s"' % ' '.join(cmd)
1302 os.chdir(cwd)
1304 revision_work_list = list(set(
1305 [int(o) for o in output.split('\n') if IsStringInt(o)]))
1306 revision_work_list = sorted(revision_work_list, reverse=True)
1307 else:
1308 cwd = self._GetDepotDirectory(depot)
1309 revision_work_list = self.source_control.GetRevisionList(bad_revision,
1310 good_revision, cwd=cwd)
1312 return revision_work_list
1314 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1315 svn_revision = self.source_control.SVNFindRev(revision)
1317 if IsStringInt(svn_revision):
1318 # V8 is tricky to bisect, in that there are only a few instances when
1319 # we can dive into bleeding_edge and get back a meaningful result.
1320 # Try to detect a V8 "business as usual" case, which is when:
1321 # 1. trunk revision N has description "Version X.Y.Z"
1322 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1323 # trunk. Now working on X.Y.(Z+1)."
1325 # As of 01/24/2014, V8 trunk descriptions are formatted:
1326 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1327 # So we can just try parsing that out first and fall back to the old way.
1328 v8_dir = self._GetDepotDirectory('v8')
1329 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1331 revision_info = self.source_control.QueryRevisionInfo(revision,
1332 cwd=v8_dir)
1334 version_re = re.compile("Version (?P<values>[0-9,.]+)")
1336 regex_results = version_re.search(revision_info['subject'])
1338 if regex_results:
1339 git_revision = None
1341 # Look for "based on bleeding_edge" and parse out revision
1342 if 'based on bleeding_edge' in revision_info['subject']:
1343 try:
1344 bleeding_edge_revision = revision_info['subject'].split(
1345 'bleeding_edge revision r')[1]
1346 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1347 git_revision = self.source_control.ResolveToRevision(
1348 bleeding_edge_revision, 'v8_bleeding_edge', 1,
1349 cwd=v8_bleeding_edge_dir)
1350 return git_revision
1351 except (IndexError, ValueError):
1352 pass
1354 if not git_revision:
1355 # Wasn't successful, try the old way of looking for "Prepare push to"
1356 git_revision = self.source_control.ResolveToRevision(
1357 int(svn_revision) - 1, 'v8_bleeding_edge', -1,
1358 cwd=v8_bleeding_edge_dir)
1360 if git_revision:
1361 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1362 cwd=v8_bleeding_edge_dir)
1364 if 'Prepare push to trunk' in revision_info['subject']:
1365 return git_revision
1366 return None
1368 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1369 cwd = self._GetDepotDirectory('v8')
1370 cmd = ['log', '--format=%ct', '-1', revision]
1371 output = CheckRunGit(cmd, cwd=cwd)
1372 commit_time = int(output)
1373 commits = []
1375 if search_forward:
1376 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1377 'origin/master']
1378 output = CheckRunGit(cmd, cwd=cwd)
1379 output = output.split()
1380 commits = output
1381 commits = reversed(commits)
1382 else:
1383 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1384 'origin/master']
1385 output = CheckRunGit(cmd, cwd=cwd)
1386 output = output.split()
1387 commits = output
1389 bleeding_edge_revision = None
1391 for c in commits:
1392 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1393 if bleeding_edge_revision:
1394 break
1396 return bleeding_edge_revision
1398 def _ParseRevisionsFromDEPSFileManually(self, deps_file_contents):
1399 """Manually parses the vars section of the DEPS file to determine
1400 chromium/blink/etc... revisions.
1402 Returns:
1403 A dict in the format {depot:revision} if successful, otherwise None.
1405 # We'll parse the "vars" section of the DEPS file.
1406 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
1407 re_results = rxp.search(deps_file_contents)
1408 locals = {}
1410 if not re_results:
1411 return None
1413 # We should be left with a series of entries in the vars component of
1414 # the DEPS file with the following format:
1415 # 'depot_name': 'revision',
1416 vars_body = re_results.group('vars_body')
1417 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
1418 re.MULTILINE)
1419 re_results = rxp.findall(vars_body)
1421 return dict(re_results)
1423 def _ParseRevisionsFromDEPSFile(self, depot):
1424 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1425 be needed if the bisect recurses into those depots later.
1427 Args:
1428 depot: Depot being bisected.
1430 Returns:
1431 A dict in the format {depot:revision} if successful, otherwise None.
1433 try:
1434 deps_data = {'Var': lambda _: deps_data["vars"][_],
1435 'From': lambda *args: None
1437 execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data)
1438 deps_data = deps_data['deps']
1440 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1441 results = {}
1442 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems():
1443 if (depot_data.get('platform') and
1444 depot_data.get('platform') != os.name):
1445 continue
1447 if (depot_data.get('recurse') and depot in depot_data.get('from')):
1448 depot_data_src = depot_data.get('src') or depot_data.get('src_old')
1449 src_dir = deps_data.get(depot_data_src)
1450 if src_dir:
1451 self.depot_cwd[depot_name] = os.path.join(self.src_cwd,
1452 depot_data_src[4:])
1453 re_results = rxp.search(src_dir)
1454 if re_results:
1455 results[depot_name] = re_results.group('revision')
1456 else:
1457 warning_text = ('Couldn\'t parse revision for %s while bisecting '
1458 '%s' % (depot_name, depot))
1459 if not warning_text in self.warnings:
1460 self.warnings.append(warning_text)
1461 else:
1462 results[depot_name] = None
1463 return results
1464 except ImportError:
1465 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT)
1466 parse_results = self._ParseRevisionsFromDEPSFileManually(
1467 deps_file_contents)
1468 results = {}
1469 for depot_name, depot_revision in parse_results.iteritems():
1470 depot_revision = depot_revision.strip('@')
1471 print depot_name, depot_revision
1472 for current_name, current_data in DEPOT_DEPS_NAME.iteritems():
1473 if (current_data.has_key('deps_var') and
1474 current_data['deps_var'] == depot_name):
1475 src_name = current_name
1476 results[src_name] = depot_revision
1477 break
1478 return results
1480 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1481 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1483 Returns:
1484 A dict in the format {depot:revision} if successful, otherwise None.
1486 cwd = os.getcwd()
1487 self.ChangeToDepotWorkingDirectory(depot)
1489 results = {}
1491 if depot == 'chromium' or depot == 'android-chrome':
1492 results = self._ParseRevisionsFromDEPSFile(depot)
1493 os.chdir(cwd)
1494 elif depot == 'cros':
1495 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1496 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1497 CROS_CHROMEOS_PATTERN]
1498 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1500 assert not return_code, 'An error occurred while running' \
1501 ' "%s"' % ' '.join(cmd)
1503 if len(output) > CROS_CHROMEOS_PATTERN:
1504 output = output[len(CROS_CHROMEOS_PATTERN):]
1506 if len(output) > 1:
1507 output = output.split('_')[0]
1509 if len(output) > 3:
1510 contents = output.split('.')
1512 version = contents[2]
1514 if contents[3] != '0':
1515 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1516 (version, contents[3], version)
1517 if not warningText in self.warnings:
1518 self.warnings.append(warningText)
1520 cwd = os.getcwd()
1521 self.ChangeToDepotWorkingDirectory('chromium')
1522 return_code = CheckRunGit(['log', '-1', '--format=%H',
1523 '--author=chrome-release@google.com', '--grep=to %s' % version,
1524 'origin/master'])
1525 os.chdir(cwd)
1527 results['chromium'] = output.strip()
1528 elif depot == 'v8':
1529 # We can't try to map the trunk revision to bleeding edge yet, because
1530 # we don't know which direction to try to search in. Have to wait until
1531 # the bisect has narrowed the results down to 2 v8 rolls.
1532 results['v8_bleeding_edge'] = None
1534 return results
1536 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1537 """Backs up or restores build output directory based on restore argument.
1539 Args:
1540 restore: Indicates whether to restore or backup. Default is False(Backup)
1541 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1543 Returns:
1544 Path to backup or restored location as string. otherwise None if it fails.
1546 build_dir = os.path.abspath(
1547 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1548 source_dir = os.path.join(build_dir, build_type)
1549 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1550 if restore:
1551 source_dir, destination_dir = destination_dir, source_dir
1552 if os.path.exists(source_dir):
1553 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1554 shutil.move(source_dir, destination_dir)
1555 return destination_dir
1556 return None
1558 def GetBuildArchiveForRevision(self, revision, gs_bucket, target_arch,
1559 patch_sha, out_dir):
1560 """Checks and downloads build archive for a given revision.
1562 Checks for build archive with Git hash or SVN revision. If either of the
1563 file exists, then downloads the archive file.
1565 Args:
1566 revision: A Git hash revision.
1567 gs_bucket: Cloud storage bucket name
1568 target_arch: 32 or 64 bit build target
1569 patch: A DEPS patch (used while bisecting 3rd party repositories).
1570 out_dir: Build output directory where downloaded file is stored.
1572 Returns:
1573 Downloaded archive file path if exists, otherwise None.
1575 # Source archive file path on cloud storage using Git revision.
1576 source_file = GetRemoteBuildPath(
1577 revision, self.opts.target_platform, target_arch, patch_sha)
1578 downloaded_archive = FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1579 if not downloaded_archive:
1580 # Get SVN revision for the given SHA.
1581 svn_revision = self.source_control.SVNFindRev(revision)
1582 if svn_revision:
1583 # Source archive file path on cloud storage using SVN revision.
1584 source_file = GetRemoteBuildPath(
1585 svn_revision, self.opts.target_platform, target_arch, patch_sha)
1586 return FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1587 return downloaded_archive
1589 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1590 """Downloads the build archive for the given revision.
1592 Args:
1593 revision: The Git revision to download or build.
1594 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1595 patch: A DEPS patch (used while bisecting 3rd party repositories).
1597 Returns:
1598 True if download succeeds, otherwise False.
1600 patch_sha = None
1601 if patch:
1602 # Get the SHA of the DEPS changes patch.
1603 patch_sha = GetSHA1HexDigest(patch)
1605 # Update the DEPS changes patch with a patch to create a new file named
1606 # 'DEPS.sha' and add patch_sha evaluated above to it.
1607 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1609 # Get Build output directory
1610 abs_build_dir = os.path.abspath(
1611 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1613 fetch_build_func = lambda: self.GetBuildArchiveForRevision(
1614 revision, self.opts.gs_bucket, self.opts.target_arch,
1615 patch_sha, abs_build_dir)
1617 # Downloaded archive file path, downloads build archive for given revision.
1618 downloaded_file = fetch_build_func()
1620 # When build archive doesn't exists, post a build request to tryserver
1621 # and wait for the build to be produced.
1622 if not downloaded_file:
1623 downloaded_file = self.PostBuildRequestAndWait(
1624 revision, fetch_build=fetch_build_func, patch=patch)
1625 if not downloaded_file:
1626 return False
1628 # Generic name for the archive, created when archive file is extracted.
1629 output_dir = os.path.join(
1630 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1631 # Unzip build archive directory.
1632 try:
1633 RmTreeAndMkDir(output_dir, skip_makedir=True)
1634 ExtractZip(downloaded_file, abs_build_dir)
1635 if os.path.exists(output_dir):
1636 self.BackupOrRestoreOutputdirectory(restore=False)
1637 # Build output directory based on target(e.g. out/Release, out/Debug).
1638 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1639 print 'Moving build from %s to %s' % (
1640 output_dir, target_build_output_dir)
1641 shutil.move(output_dir, target_build_output_dir)
1642 return True
1643 raise IOError('Missing extracted folder %s ' % output_dir)
1644 except Exception as e:
1645 print 'Somewthing went wrong while extracting archive file: %s' % e
1646 self.BackupOrRestoreOutputdirectory(restore=True)
1647 # Cleanup any leftovers from unzipping.
1648 if os.path.exists(output_dir):
1649 RmTreeAndMkDir(output_dir, skip_makedir=True)
1650 finally:
1651 # Delete downloaded archive
1652 if os.path.exists(downloaded_file):
1653 os.remove(downloaded_file)
1654 return False
1656 def WaitUntilBuildIsReady(self, fetch_build, bot_name, builder_host,
1657 builder_port, build_request_id, max_timeout):
1658 """Waits until build is produced by bisect builder on tryserver.
1660 Args:
1661 fetch_build: Function to check and download build from cloud storage.
1662 bot_name: Builder bot name on tryserver.
1663 builder_host Tryserver hostname.
1664 builder_port: Tryserver port.
1665 build_request_id: A unique ID of the build request posted to tryserver.
1666 max_timeout: Maximum time to wait for the build.
1668 Returns:
1669 Downloaded archive file path if exists, otherwise None.
1671 # Build number on the tryserver.
1672 build_num = None
1673 # Interval to check build on cloud storage.
1674 poll_interval = 60
1675 # Interval to check build status on tryserver.
1676 status_check_interval = 600
1677 last_status_check = time.time()
1678 start_time = time.time()
1679 while True:
1680 # Checks for build on gs://chrome-perf and download if exists.
1681 res = fetch_build()
1682 if res:
1683 return (res, 'Build successfully found')
1684 elapsed_status_check = time.time() - last_status_check
1685 # To avoid overloading tryserver with status check requests, we check
1686 # build status for every 10 mins.
1687 if elapsed_status_check > status_check_interval:
1688 last_status_check = time.time()
1689 if not build_num:
1690 # Get the build number on tryserver for the current build.
1691 build_num = bisect_builder.GetBuildNumFromBuilder(
1692 build_request_id, bot_name, builder_host, builder_port)
1693 # Check the status of build using the build number.
1694 # Note: Build is treated as PENDING if build number is not found
1695 # on the the tryserver.
1696 build_status, status_link = bisect_builder.GetBuildStatus(
1697 build_num, bot_name, builder_host, builder_port)
1698 if build_status == bisect_builder.FAILED:
1699 return (None, 'Failed to produce build, log: %s' % status_link)
1700 elapsed_time = time.time() - start_time
1701 if elapsed_time > max_timeout:
1702 return (None, 'Timed out: %ss without build' % max_timeout)
1704 print 'Time elapsed: %ss without build.' % elapsed_time
1705 time.sleep(poll_interval)
1706 # For some reason, mac bisect bots were not flushing stdout periodically.
1707 # As a result buildbot command is timed-out. Flush stdout on all platforms
1708 # while waiting for build.
1709 sys.stdout.flush()
1711 def PostBuildRequestAndWait(self, revision, fetch_build, patch=None):
1712 """POSTs the build request job to the tryserver instance.
1714 A try job build request is posted to tryserver.chromium.perf master,
1715 and waits for the binaries to be produced and archived on cloud storage.
1716 Once the build is ready and stored onto cloud, build archive is downloaded
1717 into the output folder.
1719 Args:
1720 revision: A Git hash revision.
1721 fetch_build: Function to check and download build from cloud storage.
1722 patch: A DEPS patch (used while bisecting 3rd party repositories).
1724 Returns:
1725 Downloaded archive file path when requested build exists and download is
1726 successful, otherwise None.
1728 # Get SVN revision for the given SHA.
1729 svn_revision = self.source_control.SVNFindRev(revision)
1730 if not svn_revision:
1731 raise RuntimeError(
1732 'Failed to determine SVN revision for %s' % revision)
1734 def GetBuilderNameAndBuildTime(target_platform, target_arch='ia32'):
1735 """Gets builder bot name and buildtime in seconds based on platform."""
1736 # Bot names should match the one listed in tryserver.chromium's
1737 # master.cfg which produces builds for bisect.
1738 if IsWindowsHost():
1739 if Is64BitWindows() and target_arch == 'x64':
1740 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1741 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1742 if IsLinuxHost():
1743 if target_platform == 'android':
1744 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1745 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1746 if IsMacHost():
1747 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1748 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1749 if not fetch_build:
1750 return False
1752 bot_name, build_timeout = GetBuilderNameAndBuildTime(
1753 self.opts.target_platform, self.opts.target_arch)
1754 builder_host = self.opts.builder_host
1755 builder_port = self.opts.builder_port
1756 # Create a unique ID for each build request posted to tryserver builders.
1757 # This ID is added to "Reason" property in build's json.
1758 build_request_id = GetSHA1HexDigest(
1759 '%s-%s-%s' % (svn_revision, patch, time.time()))
1761 # Creates a try job description.
1762 job_args = {'host': builder_host,
1763 'port': builder_port,
1764 'revision': 'src@%s' % svn_revision,
1765 'bot': bot_name,
1766 'name': build_request_id
1768 # Update patch information if supplied.
1769 if patch:
1770 job_args['patch'] = patch
1771 # Posts job to build the revision on the server.
1772 if bisect_builder.PostTryJob(job_args):
1773 target_file, error_msg = self.WaitUntilBuildIsReady(fetch_build,
1774 bot_name,
1775 builder_host,
1776 builder_port,
1777 build_request_id,
1778 build_timeout)
1779 if not target_file:
1780 print '%s [revision: %s]' % (error_msg, svn_revision)
1781 return None
1782 return target_file
1783 print 'Failed to post build request for revision: [%s]' % svn_revision
1784 return None
1786 def IsDownloadable(self, depot):
1787 """Checks if build is downloadable based on target platform and depot."""
1788 if (self.opts.target_platform in ['chromium', 'android'] and
1789 self.opts.gs_bucket):
1790 return (depot == 'chromium' or
1791 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1792 'v8' in DEPOT_DEPS_NAME[depot]['from'])
1793 return False
1795 def UpdateDeps(self, revision, depot, deps_file):
1796 """Updates DEPS file with new revision of dependency repository.
1798 This method search DEPS for a particular pattern in which depot revision
1799 is specified (e.g "webkit_revision": "123456"). If a match is found then
1800 it resolves the given git hash to SVN revision and replace it in DEPS file.
1802 Args:
1803 revision: A git hash revision of the dependency repository.
1804 depot: Current depot being bisected.
1805 deps_file: Path to DEPS file.
1807 Returns:
1808 True if DEPS file is modified successfully, otherwise False.
1810 if not os.path.exists(deps_file):
1811 return False
1813 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1814 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1815 if not deps_var:
1816 print 'DEPS update not supported for Depot: %s', depot
1817 return False
1819 # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1820 # contains "angle_revision" key that holds git hash instead of SVN revision.
1821 # And sometime "angle_revision" key is not specified in "vars" variable,
1822 # in such cases check "deps" dictionary variable that matches
1823 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1824 if depot == 'angle':
1825 return self.UpdateDEPSForAngle(revision, depot, deps_file)
1827 try:
1828 deps_contents = ReadStringFromFile(deps_file)
1829 # Check whether the depot and revision pattern in DEPS file vars
1830 # e.g. for webkit the format is "webkit_revision": "12345".
1831 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1832 re.MULTILINE)
1833 match = re.search(deps_revision, deps_contents)
1834 if match:
1835 svn_revision = self.source_control.SVNFindRev(
1836 revision, self._GetDepotDirectory(depot))
1837 if not svn_revision:
1838 print 'Could not determine SVN revision for %s' % revision
1839 return False
1840 # Update the revision information for the given depot
1841 new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1843 # For v8_bleeding_edge revisions change V8 branch in order
1844 # to fetch bleeding edge revision.
1845 if depot == 'v8_bleeding_edge':
1846 new_data = self.UpdateV8Branch(new_data)
1847 if not new_data:
1848 return False
1849 # Write changes to DEPS file
1850 WriteStringToFile(new_data, deps_file)
1851 return True
1852 except IOError, e:
1853 print 'Something went wrong while updating DEPS file. [%s]' % e
1854 return False
1856 def UpdateV8Branch(self, deps_content):
1857 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1859 Check for "v8_branch" in DEPS file if exists update its value
1860 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1861 variable from DEPS revision 254916, therefore check for "src/v8":
1862 <v8 source path> in DEPS in order to support prior DEPS revisions
1863 and update it.
1865 Args:
1866 deps_content: DEPS file contents to be modified.
1868 Returns:
1869 Modified DEPS file contents as a string.
1871 new_branch = r'branches/bleeding_edge'
1872 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
1873 if re.search(v8_branch_pattern, deps_content):
1874 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
1875 else:
1876 # Replaces the branch assigned to "src/v8" key in DEPS file.
1877 # Format of "src/v8" in DEPS:
1878 # "src/v8":
1879 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1880 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1881 v8_src_pattern = re.compile(
1882 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
1883 if re.search(v8_src_pattern, deps_content):
1884 deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
1885 return deps_content
1887 def UpdateDEPSForAngle(self, revision, depot, deps_file):
1888 """Updates DEPS file with new revision for Angle repository.
1890 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1891 variable contains "angle_revision" key that holds git hash instead of
1892 SVN revision.
1894 And sometimes "angle_revision" key is not specified in "vars" variable,
1895 in such cases check "deps" dictionary variable that matches
1896 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1898 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1899 try:
1900 deps_contents = ReadStringFromFile(deps_file)
1901 # Check whether the depot and revision pattern in DEPS file vars variable
1902 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1903 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1904 deps_var, re.MULTILINE)
1905 match = re.search(angle_rev_pattern % deps_var, deps_contents)
1906 if match:
1907 # Update the revision information for the given depot
1908 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1909 else:
1910 # Check whether the depot and revision pattern in DEPS file deps
1911 # variable. e.g.,
1912 # "src/third_party/angle": Var("chromium_git") +
1913 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1914 angle_rev_pattern = re.compile(
1915 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
1916 match = re.search(angle_rev_pattern, deps_contents)
1917 if not match:
1918 print 'Could not find angle revision information in DEPS file.'
1919 return False
1920 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1921 # Write changes to DEPS file
1922 WriteStringToFile(new_data, deps_file)
1923 return True
1924 except IOError, e:
1925 print 'Something went wrong while updating DEPS file, %s' % e
1926 return False
1928 def CreateDEPSPatch(self, depot, revision):
1929 """Modifies DEPS and returns diff as text.
1931 Args:
1932 depot: Current depot being bisected.
1933 revision: A git hash revision of the dependency repository.
1935 Returns:
1936 A tuple with git hash of chromium revision and DEPS patch text.
1938 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1939 if not os.path.exists(deps_file_path):
1940 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1941 # Get current chromium revision (git hash).
1942 chromium_sha = CheckRunGit(['rev-parse', 'HEAD']).strip()
1943 if not chromium_sha:
1944 raise RuntimeError('Failed to determine Chromium revision for %s' %
1945 revision)
1946 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1947 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1948 # Checkout DEPS file for the current chromium revision.
1949 if self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1950 chromium_sha,
1951 cwd=self.src_cwd):
1952 if self.UpdateDeps(revision, depot, deps_file_path):
1953 diff_command = ['diff',
1954 '--src-prefix=src/',
1955 '--dst-prefix=src/',
1956 '--no-ext-diff',
1957 bisect_utils.FILE_DEPS]
1958 diff_text = CheckRunGit(diff_command, cwd=self.src_cwd)
1959 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1960 else:
1961 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
1962 chromium_sha)
1963 else:
1964 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
1965 chromium_sha)
1966 return (None, None)
1968 def BuildCurrentRevision(self, depot, revision=None):
1969 """Builds chrome and performance_ui_tests on the current revision.
1971 Returns:
1972 True if the build was successful.
1974 if self.opts.debug_ignore_build:
1975 return True
1976 cwd = os.getcwd()
1977 os.chdir(self.src_cwd)
1978 # Fetch build archive for the given revision from the cloud storage when
1979 # the storage bucket is passed.
1980 if self.IsDownloadable(depot) and revision:
1981 deps_patch = None
1982 if depot != 'chromium':
1983 # Create a DEPS patch with new revision for dependency repository.
1984 (revision, deps_patch) = self.CreateDEPSPatch(depot, revision)
1985 if self.DownloadCurrentBuild(revision, patch=deps_patch):
1986 os.chdir(cwd)
1987 if deps_patch:
1988 # Reverts the changes to DEPS file.
1989 self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1990 revision,
1991 cwd=self.src_cwd)
1992 return True
1993 return False
1995 # These codes are executed when bisect bots builds binaries locally.
1996 build_success = self.builder.Build(depot, self.opts)
1997 os.chdir(cwd)
1998 return build_success
2000 def RunGClientHooks(self):
2001 """Runs gclient with runhooks command.
2003 Returns:
2004 True if gclient reports no errors.
2007 if self.opts.debug_ignore_build:
2008 return True
2010 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
2012 def TryParseHistogramValuesFromOutput(self, metric, text):
2013 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
2015 Args:
2016 metric: The metric as a list of [<trace>, <value>] strings.
2017 text: The text to parse the metric values from.
2019 Returns:
2020 A list of floating point numbers found.
2022 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
2024 text_lines = text.split('\n')
2025 values_list = []
2027 for current_line in text_lines:
2028 if metric_formatted in current_line:
2029 current_line = current_line[len(metric_formatted):]
2031 try:
2032 histogram_values = eval(current_line)
2034 for b in histogram_values['buckets']:
2035 average_for_bucket = float(b['high'] + b['low']) * 0.5
2036 # Extends the list with N-elements with the average for that bucket.
2037 values_list.extend([average_for_bucket] * b['count'])
2038 except:
2039 pass
2041 return values_list
2043 def TryParseResultValuesFromOutput(self, metric, text):
2044 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
2046 Args:
2047 metric: The metric as a list of [<trace>, <value>] strings.
2048 text: The text to parse the metric values from.
2050 Returns:
2051 A list of floating point numbers found.
2053 # Format is: RESULT <graph>: <trace>= <value> <units>
2054 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
2056 # The log will be parsed looking for format:
2057 # <*>RESULT <graph_name>: <trace_name>= <value>
2058 single_result_re = re.compile(
2059 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
2061 # The log will be parsed looking for format:
2062 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
2063 multi_results_re = re.compile(
2064 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
2066 # The log will be parsed looking for format:
2067 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
2068 mean_stddev_re = re.compile(
2069 metric_re +
2070 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
2072 text_lines = text.split('\n')
2073 values_list = []
2074 for current_line in text_lines:
2075 # Parse the output from the performance test for the metric we're
2076 # interested in.
2077 single_result_match = single_result_re.search(current_line)
2078 multi_results_match = multi_results_re.search(current_line)
2079 mean_stddev_match = mean_stddev_re.search(current_line)
2080 if (not single_result_match is None and
2081 single_result_match.group('VALUE')):
2082 values_list += [single_result_match.group('VALUE')]
2083 elif (not multi_results_match is None and
2084 multi_results_match.group('VALUES')):
2085 metric_values = multi_results_match.group('VALUES')
2086 values_list += metric_values.split(',')
2087 elif (not mean_stddev_match is None and
2088 mean_stddev_match.group('MEAN')):
2089 values_list += [mean_stddev_match.group('MEAN')]
2091 values_list = [float(v) for v in values_list if IsStringFloat(v)]
2093 # If the metric is times/t, we need to sum the timings in order to get
2094 # similar regression results as the try-bots.
2095 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
2096 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
2098 if metric in metrics_to_sum:
2099 if values_list:
2100 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
2102 return values_list
2104 def ParseMetricValuesFromOutput(self, metric, text):
2105 """Parses output from performance_ui_tests and retrieves the results for
2106 a given metric.
2108 Args:
2109 metric: The metric as a list of [<trace>, <value>] strings.
2110 text: The text to parse the metric values from.
2112 Returns:
2113 A list of floating point numbers found.
2115 metric_values = self.TryParseResultValuesFromOutput(metric, text)
2117 if not metric_values:
2118 metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
2120 return metric_values
2122 def _GenerateProfileIfNecessary(self, command_args):
2123 """Checks the command line of the performance test for dependencies on
2124 profile generation, and runs tools/perf/generate_profile as necessary.
2126 Args:
2127 command_args: Command line being passed to performance test, as a list.
2129 Returns:
2130 False if profile generation was necessary and failed, otherwise True.
2133 if '--profile-dir' in ' '.join(command_args):
2134 # If we were using python 2.7+, we could just use the argparse
2135 # module's parse_known_args to grab --profile-dir. Since some of the
2136 # bots still run 2.6, have to grab the arguments manually.
2137 arg_dict = {}
2138 args_to_parse = ['--profile-dir', '--browser']
2140 for arg_to_parse in args_to_parse:
2141 for i, current_arg in enumerate(command_args):
2142 if arg_to_parse in current_arg:
2143 current_arg_split = current_arg.split('=')
2145 # Check 2 cases, --arg=<val> and --arg <val>
2146 if len(current_arg_split) == 2:
2147 arg_dict[arg_to_parse] = current_arg_split[1]
2148 elif i + 1 < len(command_args):
2149 arg_dict[arg_to_parse] = command_args[i+1]
2151 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
2153 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
2154 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
2155 return not RunProcess(['python', path_to_generate,
2156 '--profile-type-to-generate', profile_type,
2157 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
2158 return False
2159 return True
2161 def _IsBisectModeUsingMetric(self):
2162 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
2164 def _IsBisectModeReturnCode(self):
2165 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
2167 def _IsBisectModeStandardDeviation(self):
2168 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
2170 def RunPerformanceTestAndParseResults(
2171 self, command_to_run, metric, reset_on_first_run=False,
2172 upload_on_last_run=False, results_label=None):
2173 """Runs a performance test on the current revision and parses the results.
2175 Args:
2176 command_to_run: The command to be run to execute the performance test.
2177 metric: The metric to parse out from the results of the performance test.
2178 This is the result chart name and trace name, separated by slash.
2179 reset_on_first_run: If True, pass the flag --reset-results on first run.
2180 upload_on_last_run: If True, pass the flag --upload-results on last run.
2181 results_label: A value for the option flag --results-label.
2182 The arguments reset_on_first_run, upload_on_last_run and results_label
2183 are all ignored if the test is not a Telemetry test.
2185 Returns:
2186 (values dict, 0) if --debug_ignore_perf_test was passed.
2187 (values dict, 0, test output) if the test was run successfully.
2188 (error message, -1) if the test couldn't be run.
2189 (error message, -1, test output) if the test ran but there was an error.
2191 success_code, failure_code = 0, -1
2193 if self.opts.debug_ignore_perf_test:
2194 fake_results = {
2195 'mean': 0.0,
2196 'std_err': 0.0,
2197 'std_dev': 0.0,
2198 'values': [0.0]
2200 return (fake_results, success_code)
2202 # For Windows platform set posix=False, to parse windows paths correctly.
2203 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
2204 # refer to http://bugs.python.org/issue1724822. By default posix=True.
2205 args = shlex.split(command_to_run, posix=not IsWindowsHost())
2207 if not self._GenerateProfileIfNecessary(args):
2208 err_text = 'Failed to generate profile for performance test.'
2209 return (err_text, failure_code)
2211 # If running a Telemetry test for Chrome OS, insert the remote IP and
2212 # identity parameters.
2213 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
2214 if self.opts.target_platform == 'cros' and is_telemetry:
2215 args.append('--remote=%s' % self.opts.cros_remote_ip)
2216 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
2218 start_time = time.time()
2220 metric_values = []
2221 output_of_all_runs = ''
2222 for i in xrange(self.opts.repeat_test_count):
2223 # Can ignore the return code since if the tests fail, it won't return 0.
2224 current_args = copy.copy(args)
2225 if is_telemetry:
2226 if i == 0 and reset_on_first_run:
2227 current_args.append('--reset-results')
2228 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
2229 current_args.append('--upload-results')
2230 if results_label:
2231 current_args.append('--results-label=%s' % results_label)
2232 try:
2233 (output, return_code) = RunProcessAndRetrieveOutput(current_args,
2234 cwd=self.src_cwd)
2235 except OSError, e:
2236 if e.errno == errno.ENOENT:
2237 err_text = ('Something went wrong running the performance test. '
2238 'Please review the command line:\n\n')
2239 if 'src/' in ' '.join(args):
2240 err_text += ('Check that you haven\'t accidentally specified a '
2241 'path with src/ in the command.\n\n')
2242 err_text += ' '.join(args)
2243 err_text += '\n'
2245 return (err_text, failure_code)
2246 raise
2248 output_of_all_runs += output
2249 if self.opts.output_buildbot_annotations:
2250 print output
2252 if self._IsBisectModeUsingMetric():
2253 metric_values += self.ParseMetricValuesFromOutput(metric, output)
2254 # If we're bisecting on a metric (ie, changes in the mean or
2255 # standard deviation) and no metric values are produced, bail out.
2256 if not metric_values:
2257 break
2258 elif self._IsBisectModeReturnCode():
2259 metric_values.append(return_code)
2261 elapsed_minutes = (time.time() - start_time) / 60.0
2262 if elapsed_minutes >= self.opts.max_time_minutes:
2263 break
2265 if len(metric_values) == 0:
2266 err_text = 'Metric %s was not found in the test output.' % metric
2267 # TODO(qyearsley): Consider also getting and displaying a list of metrics
2268 # that were found in the output here.
2269 return (err_text, failure_code, output_of_all_runs)
2271 # If we're bisecting on return codes, we're really just looking for zero vs
2272 # non-zero.
2273 if self._IsBisectModeReturnCode():
2274 # If any of the return codes is non-zero, output 1.
2275 overall_return_code = 0 if (
2276 all(current_value == 0 for current_value in metric_values)) else 1
2278 values = {
2279 'mean': overall_return_code,
2280 'std_err': 0.0,
2281 'std_dev': 0.0,
2282 'values': metric_values,
2285 print 'Results of performance test: Command returned with %d' % (
2286 overall_return_code)
2287 print
2288 else:
2289 # Need to get the average value if there were multiple values.
2290 truncated_mean = CalculateTruncatedMean(metric_values,
2291 self.opts.truncate_percent)
2292 standard_err = CalculateStandardError(metric_values)
2293 standard_dev = CalculateStandardDeviation(metric_values)
2295 if self._IsBisectModeStandardDeviation():
2296 metric_values = [standard_dev]
2298 values = {
2299 'mean': truncated_mean,
2300 'std_err': standard_err,
2301 'std_dev': standard_dev,
2302 'values': metric_values,
2305 print 'Results of performance test: %12f %12f' % (
2306 truncated_mean, standard_err)
2307 print
2308 return (values, success_code, output_of_all_runs)
2310 def FindAllRevisionsToSync(self, revision, depot):
2311 """Finds all dependant revisions and depots that need to be synced for a
2312 given revision. This is only useful in the git workflow, as an svn depot
2313 may be split into multiple mirrors.
2315 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2316 skia/include. To sync skia/src properly, one has to find the proper
2317 revisions in skia/gyp and skia/include.
2319 Args:
2320 revision: The revision to sync to.
2321 depot: The depot in use at the moment (probably skia).
2323 Returns:
2324 A list of [depot, revision] pairs that need to be synced.
2326 revisions_to_sync = [[depot, revision]]
2328 is_base = ((depot == 'chromium') or (depot == 'cros') or
2329 (depot == 'android-chrome'))
2331 # Some SVN depots were split into multiple git depots, so we need to
2332 # figure out for each mirror which git revision to grab. There's no
2333 # guarantee that the SVN revision will exist for each of the dependant
2334 # depots, so we have to grep the git logs and grab the next earlier one.
2335 if not is_base and\
2336 DEPOT_DEPS_NAME[depot]['depends'] and\
2337 self.source_control.IsGit():
2338 svn_rev = self.source_control.SVNFindRev(revision)
2340 for d in DEPOT_DEPS_NAME[depot]['depends']:
2341 self.ChangeToDepotWorkingDirectory(d)
2343 dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
2345 if dependant_rev:
2346 revisions_to_sync.append([d, dependant_rev])
2348 num_resolved = len(revisions_to_sync)
2349 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
2351 self.ChangeToDepotWorkingDirectory(depot)
2353 if not ((num_resolved - 1) == num_needed):
2354 return None
2356 return revisions_to_sync
2358 def PerformPreBuildCleanup(self):
2359 """Performs necessary cleanup between runs."""
2360 print 'Cleaning up between runs.'
2361 print
2363 # Having these pyc files around between runs can confuse the
2364 # perf tests and cause them to crash.
2365 for (path, _, files) in os.walk(self.src_cwd):
2366 for cur_file in files:
2367 if cur_file.endswith('.pyc'):
2368 path_to_file = os.path.join(path, cur_file)
2369 os.remove(path_to_file)
2371 def PerformWebkitDirectoryCleanup(self, revision):
2372 """If the script is switching between Blink and WebKit during bisect,
2373 its faster to just delete the directory rather than leave it up to git
2374 to sync.
2376 Returns:
2377 True if successful.
2379 if not self.source_control.CheckoutFileAtRevision(
2380 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
2381 return False
2383 cwd = os.getcwd()
2384 os.chdir(self.src_cwd)
2386 is_blink = bisect_utils.IsDepsFileBlink()
2388 os.chdir(cwd)
2390 if not self.source_control.RevertFileToHead(
2391 bisect_utils.FILE_DEPS_GIT):
2392 return False
2394 if self.was_blink != is_blink:
2395 self.was_blink = is_blink
2396 # Removes third_party/Webkit directory.
2397 return bisect_utils.RemoveThirdPartyDirectory('Webkit')
2398 return True
2400 def PerformCrosChrootCleanup(self):
2401 """Deletes the chroot.
2403 Returns:
2404 True if successful.
2406 cwd = os.getcwd()
2407 self.ChangeToDepotWorkingDirectory('cros')
2408 cmd = [CROS_SDK_PATH, '--delete']
2409 return_code = RunProcess(cmd)
2410 os.chdir(cwd)
2411 return not return_code
2413 def CreateCrosChroot(self):
2414 """Creates a new chroot.
2416 Returns:
2417 True if successful.
2419 cwd = os.getcwd()
2420 self.ChangeToDepotWorkingDirectory('cros')
2421 cmd = [CROS_SDK_PATH, '--create']
2422 return_code = RunProcess(cmd)
2423 os.chdir(cwd)
2424 return not return_code
2426 def PerformPreSyncCleanup(self, revision, depot):
2427 """Performs any necessary cleanup before syncing.
2429 Returns:
2430 True if successful.
2432 if depot == 'chromium' or depot == 'android-chrome':
2433 # Removes third_party/libjingle. At some point, libjingle was causing
2434 # issues syncing when using the git workflow (crbug.com/266324).
2435 os.chdir(self.src_cwd)
2436 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'):
2437 return False
2438 # Removes third_party/skia. At some point, skia was causing
2439 # issues syncing when using the git workflow (crbug.com/377951).
2440 if not bisect_utils.RemoveThirdPartyDirectory('skia'):
2441 return False
2442 if depot == 'chromium':
2443 # The fast webkit cleanup doesn't work for android_chrome
2444 # The switch from Webkit to Blink that this deals with now happened
2445 # quite a long time ago so this is unlikely to be a problem.
2446 return self.PerformWebkitDirectoryCleanup(revision)
2447 elif depot == 'cros':
2448 return self.PerformCrosChrootCleanup()
2449 return True
2451 def RunPostSync(self, depot):
2452 """Performs any work after syncing.
2454 Returns:
2455 True if successful.
2457 if self.opts.target_platform == 'android':
2458 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
2459 path_to_src=self.src_cwd):
2460 return False
2462 if depot == 'cros':
2463 return self.CreateCrosChroot()
2464 else:
2465 return self.RunGClientHooks()
2466 return True
2468 def ShouldSkipRevision(self, depot, revision):
2469 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2470 is git based those changes would have no effect.
2472 Args:
2473 depot: The depot being bisected.
2474 revision: Current revision we're synced to.
2476 Returns:
2477 True if we should skip building/testing this revision.
2479 if depot == 'chromium':
2480 if self.source_control.IsGit():
2481 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
2482 output = CheckRunGit(cmd)
2484 files = output.splitlines()
2486 if len(files) == 1 and files[0] == 'DEPS':
2487 return True
2489 return False
2491 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
2492 skippable=False):
2493 """Performs a full sync/build/run of the specified revision.
2495 Args:
2496 revision: The revision to sync to.
2497 depot: The depot that's being used at the moment (src, webkit, etc.)
2498 command_to_run: The command to execute the performance test.
2499 metric: The performance metric being tested.
2501 Returns:
2502 On success, a tuple containing the results of the performance test.
2503 Otherwise, a tuple with the error message.
2505 sync_client = None
2506 if depot == 'chromium' or depot == 'android-chrome':
2507 sync_client = 'gclient'
2508 elif depot == 'cros':
2509 sync_client = 'repo'
2511 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
2513 if not revisions_to_sync:
2514 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
2516 if not self.PerformPreSyncCleanup(revision, depot):
2517 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
2519 success = True
2521 if not self.opts.debug_ignore_sync:
2522 for r in revisions_to_sync:
2523 self.ChangeToDepotWorkingDirectory(r[0])
2525 if sync_client:
2526 self.PerformPreBuildCleanup()
2528 # If you're using gclient to sync, you need to specify the depot you
2529 # want so that all the dependencies sync properly as well.
2530 # ie. gclient sync src@<SHA1>
2531 current_revision = r[1]
2532 if sync_client == 'gclient':
2533 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
2534 current_revision)
2535 if not self.source_control.SyncToRevision(current_revision,
2536 sync_client):
2537 success = False
2539 break
2541 if success:
2542 success = self.RunPostSync(depot)
2543 if success:
2544 if skippable and self.ShouldSkipRevision(depot, revision):
2545 return ('Skipped revision: [%s]' % str(revision),
2546 BUILD_RESULT_SKIPPED)
2548 start_build_time = time.time()
2549 if self.BuildCurrentRevision(depot, revision):
2550 after_build_time = time.time()
2551 results = self.RunPerformanceTestAndParseResults(command_to_run,
2552 metric)
2553 # Restore build output directory once the tests are done, to avoid
2554 # any descrepancy.
2555 if self.IsDownloadable(depot) and revision:
2556 self.BackupOrRestoreOutputdirectory(restore=True)
2558 if results[1] == 0:
2559 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
2560 depot, revision)
2562 if not external_revisions is None:
2563 return (results[0], results[1], external_revisions,
2564 time.time() - after_build_time, after_build_time -
2565 start_build_time)
2566 else:
2567 return ('Failed to parse DEPS file for external revisions.',
2568 BUILD_RESULT_FAIL)
2569 else:
2570 return results
2571 else:
2572 return ('Failed to build revision: [%s]' % (str(revision, )),
2573 BUILD_RESULT_FAIL)
2574 else:
2575 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
2576 else:
2577 return ('Failed to sync revision: [%s]' % (str(revision, )),
2578 BUILD_RESULT_FAIL)
2580 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2581 """Given known good and bad values, decide if the current_value passed
2582 or failed.
2584 Args:
2585 current_value: The value of the metric being checked.
2586 known_bad_value: The reference value for a "failed" run.
2587 known_good_value: The reference value for a "passed" run.
2589 Returns:
2590 True if the current_value is closer to the known_good_value than the
2591 known_bad_value.
2593 if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2594 dist_to_good_value = abs(current_value['std_dev'] -
2595 known_good_value['std_dev'])
2596 dist_to_bad_value = abs(current_value['std_dev'] -
2597 known_bad_value['std_dev'])
2598 else:
2599 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2600 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2602 return dist_to_good_value < dist_to_bad_value
2604 def _GetDepotDirectory(self, depot_name):
2605 if depot_name == 'chromium':
2606 return self.src_cwd
2607 elif depot_name == 'cros':
2608 return self.cros_cwd
2609 elif depot_name in DEPOT_NAMES:
2610 return self.depot_cwd[depot_name]
2611 else:
2612 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2613 ' was added without proper support?' % depot_name
2615 def ChangeToDepotWorkingDirectory(self, depot_name):
2616 """Given a depot, changes to the appropriate working directory.
2618 Args:
2619 depot_name: The name of the depot (see DEPOT_NAMES).
2621 os.chdir(self._GetDepotDirectory(depot_name))
2623 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2624 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2625 search_forward=True)
2626 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2627 search_forward=False)
2628 min_revision_data['external']['v8_bleeding_edge'] = r1
2629 max_revision_data['external']['v8_bleeding_edge'] = r2
2631 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2632 min_revision_data['revision']) or
2633 not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2634 max_revision_data['revision'])):
2635 self.warnings.append('Trunk revisions in V8 did not map directly to '
2636 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2637 'did map directly to bleeding_edge revisions, but results might not '
2638 'be valid.')
2640 def _FindNextDepotToBisect(self, current_depot, current_revision,
2641 min_revision_data, max_revision_data):
2642 """Given the state of the bisect, decides which depot the script should
2643 dive into next (if any).
2645 Args:
2646 current_depot: Current depot being bisected.
2647 current_revision: Current revision synced to.
2648 min_revision_data: Data about the earliest revision in the bisect range.
2649 max_revision_data: Data about the latest revision in the bisect range.
2651 Returns:
2652 The depot to bisect next, or None.
2654 external_depot = None
2655 for next_depot in DEPOT_NAMES:
2656 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2657 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2658 continue
2660 if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
2661 min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
2662 continue
2664 if current_depot == 'v8':
2665 # We grab the bleeding_edge info here rather than earlier because we
2666 # finally have the revision range. From that we can search forwards and
2667 # backwards to try to match trunk revisions to bleeding_edge.
2668 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2670 if (min_revision_data['external'].get(next_depot) ==
2671 max_revision_data['external'].get(next_depot)):
2672 continue
2674 if (min_revision_data['external'].get(next_depot) and
2675 max_revision_data['external'].get(next_depot)):
2676 external_depot = next_depot
2677 break
2679 return external_depot
2681 def PrepareToBisectOnDepot(self,
2682 current_depot,
2683 end_revision,
2684 start_revision,
2685 previous_depot,
2686 previous_revision):
2687 """Changes to the appropriate directory and gathers a list of revisions
2688 to bisect between |start_revision| and |end_revision|.
2690 Args:
2691 current_depot: The depot we want to bisect.
2692 end_revision: End of the revision range.
2693 start_revision: Start of the revision range.
2694 previous_depot: The depot we were previously bisecting.
2695 previous_revision: The last revision we synced to on |previous_depot|.
2697 Returns:
2698 A list containing the revisions between |start_revision| and
2699 |end_revision| inclusive.
2701 # Change into working directory of external library to run
2702 # subsequent commands.
2703 self.ChangeToDepotWorkingDirectory(current_depot)
2705 # V8 (and possibly others) is merged in periodically. Bisecting
2706 # this directory directly won't give much good info.
2707 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2708 config_path = os.path.join(self.src_cwd, '..')
2709 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2710 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2711 return []
2712 if bisect_utils.RunGClient(
2713 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2714 return []
2716 if current_depot == 'v8_bleeding_edge':
2717 self.ChangeToDepotWorkingDirectory('chromium')
2719 shutil.move('v8', 'v8.bak')
2720 shutil.move('v8_bleeding_edge', 'v8')
2722 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2723 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2725 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2726 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2728 self.ChangeToDepotWorkingDirectory(current_depot)
2730 depot_revision_list = self.GetRevisionList(current_depot,
2731 end_revision,
2732 start_revision)
2734 self.ChangeToDepotWorkingDirectory('chromium')
2736 return depot_revision_list
2738 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2739 """Gathers reference values by running the performance tests on the
2740 known good and bad revisions.
2742 Args:
2743 good_rev: The last known good revision where the performance regression
2744 has not occurred yet.
2745 bad_rev: A revision where the performance regression has already occurred.
2746 cmd: The command to execute the performance test.
2747 metric: The metric being tested for regression.
2749 Returns:
2750 A tuple with the results of building and running each revision.
2752 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
2753 target_depot,
2754 cmd,
2755 metric)
2757 good_run_results = None
2759 if not bad_run_results[1]:
2760 good_run_results = self.SyncBuildAndRunRevision(good_rev,
2761 target_depot,
2762 cmd,
2763 metric)
2765 return (bad_run_results, good_run_results)
2767 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
2768 """Adds new revisions to the revision_data dict and initializes them.
2770 Args:
2771 revisions: List of revisions to add.
2772 depot: Depot that's currently in use (src, webkit, etc...)
2773 sort: Sorting key for displaying revisions.
2774 revision_data: A dict to add the new revisions into. Existing revisions
2775 will have their sort keys offset.
2778 num_depot_revisions = len(revisions)
2780 for _, v in revision_data.iteritems():
2781 if v['sort'] > sort:
2782 v['sort'] += num_depot_revisions
2784 for i in xrange(num_depot_revisions):
2785 r = revisions[i]
2787 revision_data[r] = {'revision' : r,
2788 'depot' : depot,
2789 'value' : None,
2790 'perf_time' : 0,
2791 'build_time' : 0,
2792 'passed' : '?',
2793 'sort' : i + sort + 1}
2795 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2796 if self.opts.output_buildbot_annotations:
2797 step_name = 'Bisection Range: [%s - %s]' % (
2798 revision_list[len(revision_list)-1], revision_list[0])
2799 bisect_utils.OutputAnnotationStepStart(step_name)
2801 print
2802 print 'Revisions to bisect on [%s]:' % depot
2803 for revision_id in revision_list:
2804 print ' -> %s' % (revision_id, )
2805 print
2807 if self.opts.output_buildbot_annotations:
2808 bisect_utils.OutputAnnotationStepClosed()
2810 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2811 """Checks to see if changes to DEPS file occurred, and that the revision
2812 range also includes the change to .DEPS.git. If it doesn't, attempts to
2813 expand the revision range to include it.
2815 Args:
2816 bad_rev: First known bad revision.
2817 good_revision: Last known good revision.
2819 Returns:
2820 A tuple with the new bad and good revisions.
2822 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2823 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2824 'DEPS', good_revision, bad_revision)
2826 if changes_to_deps:
2827 # DEPS file was changed, search from the oldest change to DEPS file to
2828 # bad_revision to see if there are matching .DEPS.git changes.
2829 oldest_deps_change = changes_to_deps[-1]
2830 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2831 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2833 if len(changes_to_deps) != len(changes_to_gitdeps):
2834 # Grab the timestamp of the last DEPS change
2835 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2836 output = CheckRunGit(cmd)
2837 commit_time = int(output)
2839 # Try looking for a commit that touches the .DEPS.git file in the
2840 # next 15 minutes after the DEPS file change.
2841 cmd = ['log', '--format=%H', '-1',
2842 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2843 'origin/master', bisect_utils.FILE_DEPS_GIT]
2844 output = CheckRunGit(cmd)
2845 output = output.strip()
2846 if output:
2847 self.warnings.append('Detected change to DEPS and modified '
2848 'revision range to include change to .DEPS.git')
2849 return (output, good_revision)
2850 else:
2851 self.warnings.append('Detected change to DEPS but couldn\'t find '
2852 'matching change to .DEPS.git')
2853 return (bad_revision, good_revision)
2855 def CheckIfRevisionsInProperOrder(self,
2856 target_depot,
2857 good_revision,
2858 bad_revision):
2859 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2861 Args:
2862 good_revision: Number/tag of the known good revision.
2863 bad_revision: Number/tag of the known bad revision.
2865 Returns:
2866 True if the revisions are in the proper order (good earlier than bad).
2868 if self.source_control.IsGit() and target_depot != 'cros':
2869 cmd = ['log', '--format=%ct', '-1', good_revision]
2870 cwd = self._GetDepotDirectory(target_depot)
2872 output = CheckRunGit(cmd, cwd=cwd)
2873 good_commit_time = int(output)
2875 cmd = ['log', '--format=%ct', '-1', bad_revision]
2876 output = CheckRunGit(cmd, cwd=cwd)
2877 bad_commit_time = int(output)
2879 return good_commit_time <= bad_commit_time
2880 else:
2881 # Cros/svn use integers
2882 return int(good_revision) <= int(bad_revision)
2884 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2885 """Given known good and bad revisions, run a binary search on all
2886 intermediate revisions to determine the CL where the performance regression
2887 occurred.
2889 Args:
2890 command_to_run: Specify the command to execute the performance test.
2891 good_revision: Number/tag of the known good revision.
2892 bad_revision: Number/tag of the known bad revision.
2893 metric: The performance metric to monitor.
2895 Returns:
2896 A dict with 2 members, 'revision_data' and 'error'. On success,
2897 'revision_data' will contain a dict mapping revision ids to
2898 data about that revision. Each piece of revision data consists of a
2899 dict with the following keys:
2901 'passed': Represents whether the performance test was successful at
2902 that revision. Possible values include: 1 (passed), 0 (failed),
2903 '?' (skipped), 'F' (build failed).
2904 'depot': The depot that this revision is from (ie. WebKit)
2905 'external': If the revision is a 'src' revision, 'external' contains
2906 the revisions of each of the external libraries.
2907 'sort': A sort value for sorting the dict in order of commits.
2909 For example:
2911 'error':None,
2912 'revision_data':
2914 'CL #1':
2916 'passed':False,
2917 'depot':'chromium',
2918 'external':None,
2919 'sort':0
2924 If an error occurred, the 'error' field will contain the message and
2925 'revision_data' will be empty.
2927 results = {'revision_data' : {},
2928 'error' : None}
2930 # Choose depot to bisect first
2931 target_depot = 'chromium'
2932 if self.opts.target_platform == 'cros':
2933 target_depot = 'cros'
2934 elif self.opts.target_platform == 'android-chrome':
2935 target_depot = 'android-chrome'
2937 cwd = os.getcwd()
2938 self.ChangeToDepotWorkingDirectory(target_depot)
2940 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2941 bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2942 target_depot, 100)
2943 good_revision = self.source_control.ResolveToRevision(good_revision_in,
2944 target_depot, -100)
2946 os.chdir(cwd)
2949 if bad_revision is None:
2950 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2951 return results
2953 if good_revision is None:
2954 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2955 return results
2957 # Check that they didn't accidentally swap good and bad revisions.
2958 if not self.CheckIfRevisionsInProperOrder(
2959 target_depot, good_revision, bad_revision):
2960 results['error'] = 'bad_revision < good_revision, did you swap these '\
2961 'by mistake?'
2962 return results
2964 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2965 bad_revision, good_revision)
2967 if self.opts.output_buildbot_annotations:
2968 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2970 print 'Gathering revision range for bisection.'
2971 # Retrieve a list of revisions to do bisection on.
2972 src_revision_list = self.GetRevisionList(target_depot,
2973 bad_revision,
2974 good_revision)
2976 if self.opts.output_buildbot_annotations:
2977 bisect_utils.OutputAnnotationStepClosed()
2979 if src_revision_list:
2980 # revision_data will store information about a revision such as the
2981 # depot it came from, the webkit/V8 revision at that time,
2982 # performance timing, build state, etc...
2983 revision_data = results['revision_data']
2985 # revision_list is the list we're binary searching through at the moment.
2986 revision_list = []
2988 sort_key_ids = 0
2990 for current_revision_id in src_revision_list:
2991 sort_key_ids += 1
2993 revision_data[current_revision_id] = {'value' : None,
2994 'passed' : '?',
2995 'depot' : target_depot,
2996 'external' : None,
2997 'perf_time' : 0,
2998 'build_time' : 0,
2999 'sort' : sort_key_ids}
3000 revision_list.append(current_revision_id)
3002 min_revision = 0
3003 max_revision = len(revision_list) - 1
3005 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
3007 if self.opts.output_buildbot_annotations:
3008 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
3010 print 'Gathering reference values for bisection.'
3012 # Perform the performance tests on the good and bad revisions, to get
3013 # reference values.
3014 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
3015 bad_revision,
3016 command_to_run,
3017 metric,
3018 target_depot)
3020 if self.opts.output_buildbot_annotations:
3021 bisect_utils.OutputAnnotationStepClosed()
3023 if bad_results[1]:
3024 results['error'] = ('An error occurred while building and running '
3025 'the \'bad\' reference value. The bisect cannot continue without '
3026 'a working \'bad\' revision to start from.\n\nError: %s' %
3027 bad_results[0])
3028 return results
3030 if good_results[1]:
3031 results['error'] = ('An error occurred while building and running '
3032 'the \'good\' reference value. The bisect cannot continue without '
3033 'a working \'good\' revision to start from.\n\nError: %s' %
3034 good_results[0])
3035 return results
3038 # We need these reference values to determine if later runs should be
3039 # classified as pass or fail.
3040 known_bad_value = bad_results[0]
3041 known_good_value = good_results[0]
3043 # Can just mark the good and bad revisions explicitly here since we
3044 # already know the results.
3045 bad_revision_data = revision_data[revision_list[0]]
3046 bad_revision_data['external'] = bad_results[2]
3047 bad_revision_data['perf_time'] = bad_results[3]
3048 bad_revision_data['build_time'] = bad_results[4]
3049 bad_revision_data['passed'] = False
3050 bad_revision_data['value'] = known_bad_value
3052 good_revision_data = revision_data[revision_list[max_revision]]
3053 good_revision_data['external'] = good_results[2]
3054 good_revision_data['perf_time'] = good_results[3]
3055 good_revision_data['build_time'] = good_results[4]
3056 good_revision_data['passed'] = True
3057 good_revision_data['value'] = known_good_value
3059 next_revision_depot = target_depot
3061 while True:
3062 if not revision_list:
3063 break
3065 min_revision_data = revision_data[revision_list[min_revision]]
3066 max_revision_data = revision_data[revision_list[max_revision]]
3068 if max_revision - min_revision <= 1:
3069 current_depot = min_revision_data['depot']
3070 if min_revision_data['passed'] == '?':
3071 next_revision_index = min_revision
3072 elif max_revision_data['passed'] == '?':
3073 next_revision_index = max_revision
3074 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
3075 previous_revision = revision_list[min_revision]
3076 # If there were changes to any of the external libraries we track,
3077 # should bisect the changes there as well.
3078 external_depot = self._FindNextDepotToBisect(current_depot,
3079 previous_revision, min_revision_data, max_revision_data)
3081 # If there was no change in any of the external depots, the search
3082 # is over.
3083 if not external_depot:
3084 if current_depot == 'v8':
3085 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
3086 'continue any further. The script can only bisect into '
3087 'V8\'s bleeding_edge repository if both the current and '
3088 'previous revisions in trunk map directly to revisions in '
3089 'bleeding_edge.')
3090 break
3092 earliest_revision = max_revision_data['external'][external_depot]
3093 latest_revision = min_revision_data['external'][external_depot]
3095 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
3096 latest_revision,
3097 earliest_revision,
3098 next_revision_depot,
3099 previous_revision)
3101 if not new_revision_list:
3102 results['error'] = 'An error occurred attempting to retrieve'\
3103 ' revision range: [%s..%s]' % \
3104 (earliest_revision, latest_revision)
3105 return results
3107 self.AddRevisionsIntoRevisionData(new_revision_list,
3108 external_depot,
3109 min_revision_data['sort'],
3110 revision_data)
3112 # Reset the bisection and perform it on the newly inserted
3113 # changelists.
3114 revision_list = new_revision_list
3115 min_revision = 0
3116 max_revision = len(revision_list) - 1
3117 sort_key_ids += len(revision_list)
3119 print 'Regression in metric:%s appears to be the result of changes'\
3120 ' in [%s].' % (metric, external_depot)
3122 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
3124 continue
3125 else:
3126 break
3127 else:
3128 next_revision_index = int((max_revision - min_revision) / 2) +\
3129 min_revision
3131 next_revision_id = revision_list[next_revision_index]
3132 next_revision_data = revision_data[next_revision_id]
3133 next_revision_depot = next_revision_data['depot']
3135 self.ChangeToDepotWorkingDirectory(next_revision_depot)
3137 if self.opts.output_buildbot_annotations:
3138 step_name = 'Working on [%s]' % next_revision_id
3139 bisect_utils.OutputAnnotationStepStart(step_name)
3141 print 'Working on revision: [%s]' % next_revision_id
3143 run_results = self.SyncBuildAndRunRevision(next_revision_id,
3144 next_revision_depot,
3145 command_to_run,
3146 metric, skippable=True)
3148 # If the build is successful, check whether or not the metric
3149 # had regressed.
3150 if not run_results[1]:
3151 if len(run_results) > 2:
3152 next_revision_data['external'] = run_results[2]
3153 next_revision_data['perf_time'] = run_results[3]
3154 next_revision_data['build_time'] = run_results[4]
3156 passed_regression = self._CheckIfRunPassed(run_results[0],
3157 known_good_value,
3158 known_bad_value)
3160 next_revision_data['passed'] = passed_regression
3161 next_revision_data['value'] = run_results[0]
3163 if passed_regression:
3164 max_revision = next_revision_index
3165 else:
3166 min_revision = next_revision_index
3167 else:
3168 if run_results[1] == BUILD_RESULT_SKIPPED:
3169 next_revision_data['passed'] = 'Skipped'
3170 elif run_results[1] == BUILD_RESULT_FAIL:
3171 next_revision_data['passed'] = 'Build Failed'
3173 print run_results[0]
3175 # If the build is broken, remove it and redo search.
3176 revision_list.pop(next_revision_index)
3178 max_revision -= 1
3180 if self.opts.output_buildbot_annotations:
3181 self._PrintPartialResults(results)
3182 bisect_utils.OutputAnnotationStepClosed()
3183 else:
3184 # Weren't able to sync and retrieve the revision range.
3185 results['error'] = 'An error occurred attempting to retrieve revision '\
3186 'range: [%s..%s]' % (good_revision, bad_revision)
3188 return results
3190 def _PrintPartialResults(self, results_dict):
3191 revision_data = results_dict['revision_data']
3192 revision_data_sorted = sorted(revision_data.iteritems(),
3193 key = lambda x: x[1]['sort'])
3194 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3196 self._PrintTestedCommitsTable(revision_data_sorted,
3197 results_dict['first_working_revision'],
3198 results_dict['last_broken_revision'],
3199 100, final_step=False)
3201 def _PrintConfidence(self, results_dict):
3202 # The perf dashboard specifically looks for the string
3203 # "Confidence in Bisection Results: 100%" to decide whether or not
3204 # to cc the author(s). If you change this, please update the perf
3205 # dashboard as well.
3206 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
3208 def _PrintBanner(self, results_dict):
3209 print
3210 print " __o_\___ Aw Snap! We hit a speed bump!"
3211 print "=-O----O-'__.~.___________________________________"
3212 print
3213 if self._IsBisectModeReturnCode():
3214 print ('Bisect reproduced a change in return codes while running the '
3215 'performance test.')
3216 else:
3217 print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the '
3218 '%s metric.' % (results_dict['regression_size'],
3219 results_dict['regression_std_err'], '/'.join(self.opts.metric)))
3220 self._PrintConfidence(results_dict)
3222 def _PrintFailedBanner(self, results_dict):
3223 print
3224 if self._IsBisectModeReturnCode():
3225 print 'Bisect could not reproduce a change in the return code.'
3226 else:
3227 print ('Bisect could not reproduce a change in the '
3228 '%s metric.' % '/'.join(self.opts.metric))
3229 print
3231 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
3232 info = self.source_control.QueryRevisionInfo(cl,
3233 self._GetDepotDirectory(depot))
3234 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
3235 try:
3236 # Format is "git-svn-id: svn://....@123456 <other data>"
3237 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
3238 svn_revision = svn_line[0].split('@')
3239 svn_revision = svn_revision[1].split(' ')[0]
3240 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
3241 except IndexError:
3242 return ''
3243 return ''
3245 def _PrintRevisionInfo(self, cl, info, depot=None):
3246 # The perf dashboard specifically looks for the string
3247 # "Author : " to parse out who to cc on a bug. If you change the
3248 # formatting here, please update the perf dashboard as well.
3249 print
3250 print 'Subject : %s' % info['subject']
3251 print 'Author : %s' % info['author']
3252 if not info['email'].startswith(info['author']):
3253 print 'Email : %s' % info['email']
3254 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
3255 if commit_link:
3256 print 'Link : %s' % commit_link
3257 else:
3258 print
3259 print 'Failed to parse svn revision from body:'
3260 print
3261 print info['body']
3262 print
3263 print 'Commit : %s' % cl
3264 print 'Date : %s' % info['date']
3266 def _PrintTableRow(self, column_widths, row_data):
3267 assert len(column_widths) == len(row_data)
3269 text = ''
3270 for i in xrange(len(column_widths)):
3271 current_row_data = row_data[i].center(column_widths[i], ' ')
3272 text += ('%%%ds' % column_widths[i]) % current_row_data
3273 print text
3275 def _PrintTestedCommitsHeader(self):
3276 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3277 self._PrintTableRow(
3278 [20, 70, 14, 12, 13],
3279 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
3280 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3281 self._PrintTableRow(
3282 [20, 70, 14, 12, 13],
3283 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
3284 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3285 self._PrintTableRow(
3286 [20, 70, 14, 13],
3287 ['Depot', 'Commit SHA', 'Return Code', 'State'])
3288 else:
3289 assert False, "Invalid bisect_mode specified."
3290 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '),
3291 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '),
3292 'State'.center(13, ' '))
3294 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
3295 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3296 std_error = '+-%.02f' % current_data['value']['std_err']
3297 mean = '%.02f' % current_data['value']['mean']
3298 self._PrintTableRow(
3299 [20, 70, 12, 14, 13],
3300 [current_data['depot'], cl_link, mean, std_error, state_str])
3301 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3302 std_error = '+-%.02f' % current_data['value']['std_err']
3303 mean = '%.02f' % current_data['value']['mean']
3304 self._PrintTableRow(
3305 [20, 70, 12, 14, 13],
3306 [current_data['depot'], cl_link, std_error, mean, state_str])
3307 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3308 mean = '%d' % current_data['value']['mean']
3309 self._PrintTableRow(
3310 [20, 70, 14, 13],
3311 [current_data['depot'], cl_link, mean, state_str])
3313 def _PrintTestedCommitsTable(self, revision_data_sorted,
3314 first_working_revision, last_broken_revision, confidence,
3315 final_step=True):
3316 print
3317 if final_step:
3318 print 'Tested commits:'
3319 else:
3320 print 'Partial results:'
3321 self._PrintTestedCommitsHeader()
3322 state = 0
3323 for current_id, current_data in revision_data_sorted:
3324 if current_data['value']:
3325 if (current_id == last_broken_revision or
3326 current_id == first_working_revision):
3327 # If confidence is too low, don't add this empty line since it's
3328 # used to put focus on a suspected CL.
3329 if confidence and final_step:
3330 print
3331 state += 1
3332 if state == 2 and not final_step:
3333 # Just want a separation between "bad" and "good" cl's.
3334 print
3336 state_str = 'Bad'
3337 if state == 1 and final_step:
3338 state_str = 'Suspected CL'
3339 elif state == 2:
3340 state_str = 'Good'
3342 # If confidence is too low, don't bother outputting good/bad.
3343 if not confidence:
3344 state_str = ''
3345 state_str = state_str.center(13, ' ')
3347 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3348 current_data['depot'])
3349 if not cl_link:
3350 cl_link = current_id
3351 self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
3353 def _PrintReproSteps(self):
3354 print
3355 print 'To reproduce locally:'
3356 print '$ ' + self.opts.command
3357 if bisect_utils.IsTelemetryCommand(self.opts.command):
3358 print
3359 print 'Also consider passing --profiler=list to see available profilers.'
3361 def _PrintOtherRegressions(self, other_regressions, revision_data):
3362 print
3363 print 'Other regressions may have occurred:'
3364 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3365 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3366 for regression in other_regressions:
3367 current_id, previous_id, confidence = regression
3368 current_data = revision_data[current_id]
3369 previous_data = revision_data[previous_id]
3371 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3372 current_data['depot'])
3373 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
3374 previous_data['depot'])
3376 # If we can't map it to a viewable URL, at least show the original hash.
3377 if not current_link:
3378 current_link = current_id
3379 if not previous_link:
3380 previous_link = previous_id
3382 print ' %8s %70s %s' % (
3383 current_data['depot'], current_link,
3384 ('%d%%' % confidence).center(10, ' '))
3385 print ' %8s %70s' % (
3386 previous_data['depot'], previous_link)
3387 print
3389 def _PrintStepTime(self, revision_data_sorted):
3390 step_perf_time_avg = 0.0
3391 step_build_time_avg = 0.0
3392 step_count = 0.0
3393 for _, current_data in revision_data_sorted:
3394 if current_data['value']:
3395 step_perf_time_avg += current_data['perf_time']
3396 step_build_time_avg += current_data['build_time']
3397 step_count += 1
3398 if step_count:
3399 step_perf_time_avg = step_perf_time_avg / step_count
3400 step_build_time_avg = step_build_time_avg / step_count
3401 print
3402 print 'Average build time : %s' % datetime.timedelta(
3403 seconds=int(step_build_time_avg))
3404 print 'Average test time : %s' % datetime.timedelta(
3405 seconds=int(step_perf_time_avg))
3407 def _PrintWarnings(self):
3408 if not self.warnings:
3409 return
3410 print
3411 print 'WARNINGS:'
3412 for w in set(self.warnings):
3413 print ' !!! %s' % w
3415 def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
3416 other_regressions = []
3417 previous_values = []
3418 previous_id = None
3419 for current_id, current_data in revision_data_sorted:
3420 current_values = current_data['value']
3421 if current_values:
3422 current_values = current_values['values']
3423 if previous_values:
3424 confidence = CalculateConfidence(previous_values, [current_values])
3425 mean_of_prev_runs = CalculateMean(sum(previous_values, []))
3426 mean_of_current_runs = CalculateMean(current_values)
3428 # Check that the potential regression is in the same direction as
3429 # the overall regression. If the mean of the previous runs < the
3430 # mean of the current runs, this local regression is in same
3431 # direction.
3432 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
3433 is_same_direction = (prev_less_than_current if
3434 bad_greater_than_good else not prev_less_than_current)
3436 # Only report potential regressions with high confidence.
3437 if is_same_direction and confidence > 50:
3438 other_regressions.append([current_id, previous_id, confidence])
3439 previous_values.append(current_values)
3440 previous_id = current_id
3441 return other_regressions
3444 def _GetResultsDict(self, revision_data, revision_data_sorted):
3445 # Find range where it possibly broke.
3446 first_working_revision = None
3447 first_working_revision_index = -1
3448 last_broken_revision = None
3449 last_broken_revision_index = -1
3451 for i in xrange(len(revision_data_sorted)):
3452 k, v = revision_data_sorted[i]
3453 if v['passed'] == 1:
3454 if not first_working_revision:
3455 first_working_revision = k
3456 first_working_revision_index = i
3458 if not v['passed']:
3459 last_broken_revision = k
3460 last_broken_revision_index = i
3462 if last_broken_revision != None and first_working_revision != None:
3463 broken_means = []
3464 for i in xrange(0, last_broken_revision_index + 1):
3465 if revision_data_sorted[i][1]['value']:
3466 broken_means.append(revision_data_sorted[i][1]['value']['values'])
3468 working_means = []
3469 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
3470 if revision_data_sorted[i][1]['value']:
3471 working_means.append(revision_data_sorted[i][1]['value']['values'])
3473 # Flatten the lists to calculate mean of all values.
3474 working_mean = sum(working_means, [])
3475 broken_mean = sum(broken_means, [])
3477 # Calculate the approximate size of the regression
3478 mean_of_bad_runs = CalculateMean(broken_mean)
3479 mean_of_good_runs = CalculateMean(working_mean)
3481 regression_size = 100 * CalculateRelativeChange(mean_of_good_runs,
3482 mean_of_bad_runs)
3483 if math.isnan(regression_size):
3484 regression_size = 'zero-to-nonzero'
3486 regression_std_err = math.fabs(CalculatePooledStandardError(
3487 [working_mean, broken_mean]) /
3488 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
3490 # Give a "confidence" in the bisect. At the moment we use how distinct the
3491 # values are before and after the last broken revision, and how noisy the
3492 # overall graph is.
3493 confidence = CalculateConfidence(working_means, broken_means)
3495 culprit_revisions = []
3497 cwd = os.getcwd()
3498 self.ChangeToDepotWorkingDirectory(
3499 revision_data[last_broken_revision]['depot'])
3501 if revision_data[last_broken_revision]['depot'] == 'cros':
3502 # Want to get a list of all the commits and what depots they belong
3503 # to so that we can grab info about each.
3504 cmd = ['repo', 'forall', '-c',
3505 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3506 last_broken_revision, first_working_revision + 1)]
3507 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
3509 changes = []
3510 assert not return_code, 'An error occurred while running'\
3511 ' "%s"' % ' '.join(cmd)
3512 last_depot = None
3513 cwd = os.getcwd()
3514 for l in output.split('\n'):
3515 if l:
3516 # Output will be in form:
3517 # /path_to_depot
3518 # /path_to_other_depot
3519 # <SHA1>
3520 # /path_again
3521 # <SHA1>
3522 # etc.
3523 if l[0] == '/':
3524 last_depot = l
3525 else:
3526 contents = l.split(' ')
3527 if len(contents) > 1:
3528 changes.append([last_depot, contents[0]])
3529 for c in changes:
3530 os.chdir(c[0])
3531 info = self.source_control.QueryRevisionInfo(c[1])
3532 culprit_revisions.append((c[1], info, None))
3533 else:
3534 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
3535 k, v = revision_data_sorted[i]
3536 if k == first_working_revision:
3537 break
3538 self.ChangeToDepotWorkingDirectory(v['depot'])
3539 info = self.source_control.QueryRevisionInfo(k)
3540 culprit_revisions.append((k, info, v['depot']))
3541 os.chdir(cwd)
3543 # Check for any other possible regression ranges
3544 other_regressions = self._FindOtherRegressions(revision_data_sorted,
3545 mean_of_bad_runs > mean_of_good_runs)
3547 return {
3548 'first_working_revision': first_working_revision,
3549 'last_broken_revision': last_broken_revision,
3550 'culprit_revisions': culprit_revisions,
3551 'other_regressions': other_regressions,
3552 'regression_size': regression_size,
3553 'regression_std_err': regression_std_err,
3554 'confidence': confidence,
3557 def _CheckForWarnings(self, results_dict):
3558 if len(results_dict['culprit_revisions']) > 1:
3559 self.warnings.append('Due to build errors, regression range could '
3560 'not be narrowed down to a single commit.')
3561 if self.opts.repeat_test_count == 1:
3562 self.warnings.append('Tests were only set to run once. This may '
3563 'be insufficient to get meaningful results.')
3564 if results_dict['confidence'] < 100:
3565 if results_dict['confidence']:
3566 self.warnings.append(
3567 'Confidence is less than 100%. There could be other candidates '
3568 'for this regression. Try bisecting again with increased '
3569 'repeat_count or on a sub-metric that shows the regression more '
3570 'clearly.')
3571 else:
3572 self.warnings.append(
3573 'Confidence is 0%. Try bisecting again on another platform, with '
3574 'increased repeat_count or on a sub-metric that shows the '
3575 'regression more clearly.')
3577 def FormatAndPrintResults(self, bisect_results):
3578 """Prints the results from a bisection run in a readable format.
3580 Args
3581 bisect_results: The results from a bisection test run.
3583 revision_data = bisect_results['revision_data']
3584 revision_data_sorted = sorted(revision_data.iteritems(),
3585 key = lambda x: x[1]['sort'])
3586 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3588 self._CheckForWarnings(results_dict)
3590 if self.opts.output_buildbot_annotations:
3591 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
3593 print
3594 print 'Full results of bisection:'
3595 for current_id, current_data in revision_data_sorted:
3596 build_status = current_data['passed']
3598 if type(build_status) is bool:
3599 if build_status:
3600 build_status = 'Good'
3601 else:
3602 build_status = 'Bad'
3604 print ' %20s %40s %s' % (current_data['depot'],
3605 current_id, build_status)
3606 print
3608 if self.opts.output_buildbot_annotations:
3609 bisect_utils.OutputAnnotationStepClosed()
3610 # The perf dashboard scrapes the "results" step in order to comment on
3611 # bugs. If you change this, please update the perf dashboard as well.
3612 bisect_utils.OutputAnnotationStepStart('Results')
3614 if results_dict['culprit_revisions'] and results_dict['confidence']:
3615 self._PrintBanner(results_dict)
3616 for culprit in results_dict['culprit_revisions']:
3617 cl, info, depot = culprit
3618 self._PrintRevisionInfo(cl, info, depot)
3619 self._PrintReproSteps()
3620 if results_dict['other_regressions']:
3621 self._PrintOtherRegressions(results_dict['other_regressions'],
3622 revision_data)
3623 else:
3624 self._PrintFailedBanner(results_dict)
3625 self._PrintReproSteps()
3627 self._PrintTestedCommitsTable(revision_data_sorted,
3628 results_dict['first_working_revision'],
3629 results_dict['last_broken_revision'],
3630 results_dict['confidence'])
3631 self._PrintStepTime(revision_data_sorted)
3632 self._PrintWarnings()
3634 if self.opts.output_buildbot_annotations:
3635 bisect_utils.OutputAnnotationStepClosed()
3638 def DetermineAndCreateSourceControl(opts):
3639 """Attempts to determine the underlying source control workflow and returns
3640 a SourceControl object.
3642 Returns:
3643 An instance of a SourceControl object, or None if the current workflow
3644 is unsupported.
3647 (output, _) = RunGit(['rev-parse', '--is-inside-work-tree'])
3649 if output.strip() == 'true':
3650 return GitSourceControl(opts)
3652 return None
3655 def IsPlatformSupported(opts):
3656 """Checks that this platform and build system are supported.
3658 Args:
3659 opts: The options parsed from the command line.
3661 Returns:
3662 True if the platform and build system are supported.
3664 # Haven't tested the script out on any other platforms yet.
3665 supported = ['posix', 'nt']
3666 return os.name in supported
3669 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3670 """Removes the directory tree specified, and then creates an empty
3671 directory in the same location (if not specified to skip).
3673 Args:
3674 path_to_dir: Path to the directory tree.
3675 skip_makedir: Whether to skip creating empty directory, default is False.
3677 Returns:
3678 True if successful, False if an error occurred.
3680 try:
3681 if os.path.exists(path_to_dir):
3682 shutil.rmtree(path_to_dir)
3683 except OSError, e:
3684 if e.errno != errno.ENOENT:
3685 return False
3687 if not skip_makedir:
3688 return MaybeMakeDirectory(path_to_dir)
3690 return True
3693 def RemoveBuildFiles(build_type):
3694 """Removes build files from previous runs."""
3695 if RmTreeAndMkDir(os.path.join('out', build_type)):
3696 if RmTreeAndMkDir(os.path.join('build', build_type)):
3697 return True
3698 return False
3701 class BisectOptions(object):
3702 """Options to be used when running bisection."""
3703 def __init__(self):
3704 super(BisectOptions, self).__init__()
3706 self.target_platform = 'chromium'
3707 self.build_preference = None
3708 self.good_revision = None
3709 self.bad_revision = None
3710 self.use_goma = None
3711 self.cros_board = None
3712 self.cros_remote_ip = None
3713 self.repeat_test_count = 20
3714 self.truncate_percent = 25
3715 self.max_time_minutes = 20
3716 self.metric = None
3717 self.command = None
3718 self.output_buildbot_annotations = None
3719 self.no_custom_deps = False
3720 self.working_directory = None
3721 self.extra_src = None
3722 self.debug_ignore_build = None
3723 self.debug_ignore_sync = None
3724 self.debug_ignore_perf_test = None
3725 self.gs_bucket = None
3726 self.target_arch = 'ia32'
3727 self.target_build_type = 'Release'
3728 self.builder_host = None
3729 self.builder_port = None
3730 self.bisect_mode = BISECT_MODE_MEAN
3732 def _CreateCommandLineParser(self):
3733 """Creates a parser with bisect options.
3735 Returns:
3736 An instance of optparse.OptionParser.
3738 usage = ('%prog [options] [-- chromium-options]\n'
3739 'Perform binary search on revision history to find a minimal '
3740 'range of revisions where a peformance metric regressed.\n')
3742 parser = optparse.OptionParser(usage=usage)
3744 group = optparse.OptionGroup(parser, 'Bisect options')
3745 group.add_option('-c', '--command',
3746 type='str',
3747 help='A command to execute your performance test at' +
3748 ' each point in the bisection.')
3749 group.add_option('-b', '--bad_revision',
3750 type='str',
3751 help='A bad revision to start bisection. ' +
3752 'Must be later than good revision. May be either a git' +
3753 ' or svn revision.')
3754 group.add_option('-g', '--good_revision',
3755 type='str',
3756 help='A revision to start bisection where performance' +
3757 ' test is known to pass. Must be earlier than the ' +
3758 'bad revision. May be either a git or svn revision.')
3759 group.add_option('-m', '--metric',
3760 type='str',
3761 help='The desired metric to bisect on. For example ' +
3762 '"vm_rss_final_b/vm_rss_f_b"')
3763 group.add_option('-r', '--repeat_test_count',
3764 type='int',
3765 default=20,
3766 help='The number of times to repeat the performance '
3767 'test. Values will be clamped to range [1, 100]. '
3768 'Default value is 20.')
3769 group.add_option('--max_time_minutes',
3770 type='int',
3771 default=20,
3772 help='The maximum time (in minutes) to take running the '
3773 'performance tests. The script will run the performance '
3774 'tests according to --repeat_test_count, so long as it '
3775 'doesn\'t exceed --max_time_minutes. Values will be '
3776 'clamped to range [1, 60].'
3777 'Default value is 20.')
3778 group.add_option('-t', '--truncate_percent',
3779 type='int',
3780 default=25,
3781 help='The highest/lowest % are discarded to form a '
3782 'truncated mean. Values will be clamped to range [0, '
3783 '25]. Default value is 25 (highest/lowest 25% will be '
3784 'discarded).')
3785 group.add_option('--bisect_mode',
3786 type='choice',
3787 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3788 BISECT_MODE_RETURN_CODE],
3789 default=BISECT_MODE_MEAN,
3790 help='The bisect mode. Choices are to bisect on the '
3791 'difference in mean, std_dev, or return_code.')
3792 parser.add_option_group(group)
3794 group = optparse.OptionGroup(parser, 'Build options')
3795 group.add_option('-w', '--working_directory',
3796 type='str',
3797 help='Path to the working directory where the script '
3798 'will do an initial checkout of the chromium depot. The '
3799 'files will be placed in a subdirectory "bisect" under '
3800 'working_directory and that will be used to perform the '
3801 'bisection. This parameter is optional, if it is not '
3802 'supplied, the script will work from the current depot.')
3803 group.add_option('--build_preference',
3804 type='choice',
3805 choices=['msvs', 'ninja', 'make'],
3806 help='The preferred build system to use. On linux/mac '
3807 'the options are make/ninja. On Windows, the options '
3808 'are msvs/ninja.')
3809 group.add_option('--target_platform',
3810 type='choice',
3811 choices=['chromium', 'cros', 'android', 'android-chrome'],
3812 default='chromium',
3813 help='The target platform. Choices are "chromium" '
3814 '(current platform), "cros", or "android". If you '
3815 'specify something other than "chromium", you must be '
3816 'properly set up to build that platform.')
3817 group.add_option('--no_custom_deps',
3818 dest='no_custom_deps',
3819 action="store_true",
3820 default=False,
3821 help='Run the script with custom_deps or not.')
3822 group.add_option('--extra_src',
3823 type='str',
3824 help='Path to a script which can be used to modify '
3825 'the bisect script\'s behavior.')
3826 group.add_option('--cros_board',
3827 type='str',
3828 help='The cros board type to build.')
3829 group.add_option('--cros_remote_ip',
3830 type='str',
3831 help='The remote machine to image to.')
3832 group.add_option('--use_goma',
3833 action="store_true",
3834 help='Add a bunch of extra threads for goma, and enable '
3835 'goma')
3836 group.add_option('--output_buildbot_annotations',
3837 action="store_true",
3838 help='Add extra annotation output for buildbot.')
3839 group.add_option('--gs_bucket',
3840 default='',
3841 dest='gs_bucket',
3842 type='str',
3843 help=('Name of Google Storage bucket to upload or '
3844 'download build. e.g., chrome-perf'))
3845 group.add_option('--target_arch',
3846 type='choice',
3847 choices=['ia32', 'x64', 'arm'],
3848 default='ia32',
3849 dest='target_arch',
3850 help=('The target build architecture. Choices are "ia32" '
3851 '(default), "x64" or "arm".'))
3852 group.add_option('--target_build_type',
3853 type='choice',
3854 choices=['Release', 'Debug'],
3855 default='Release',
3856 help='The target build type. Choices are "Release" '
3857 '(default), or "Debug".')
3858 group.add_option('--builder_host',
3859 dest='builder_host',
3860 type='str',
3861 help=('Host address of server to produce build by posting'
3862 ' try job request.'))
3863 group.add_option('--builder_port',
3864 dest='builder_port',
3865 type='int',
3866 help=('HTTP port of the server to produce build by posting'
3867 ' try job request.'))
3868 parser.add_option_group(group)
3870 group = optparse.OptionGroup(parser, 'Debug options')
3871 group.add_option('--debug_ignore_build',
3872 action="store_true",
3873 help='DEBUG: Don\'t perform builds.')
3874 group.add_option('--debug_ignore_sync',
3875 action="store_true",
3876 help='DEBUG: Don\'t perform syncs.')
3877 group.add_option('--debug_ignore_perf_test',
3878 action="store_true",
3879 help='DEBUG: Don\'t perform performance tests.')
3880 parser.add_option_group(group)
3881 return parser
3883 def ParseCommandLine(self):
3884 """Parses the command line for bisect options."""
3885 parser = self._CreateCommandLineParser()
3886 (opts, _) = parser.parse_args()
3888 try:
3889 if not opts.command:
3890 raise RuntimeError('missing required parameter: --command')
3892 if not opts.good_revision:
3893 raise RuntimeError('missing required parameter: --good_revision')
3895 if not opts.bad_revision:
3896 raise RuntimeError('missing required parameter: --bad_revision')
3898 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3899 raise RuntimeError('missing required parameter: --metric')
3901 if opts.gs_bucket:
3902 if not cloud_storage.List(opts.gs_bucket):
3903 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3904 if not opts.builder_host:
3905 raise RuntimeError('Must specify try server hostname, when '
3906 'gs_bucket is used: --builder_host')
3907 if not opts.builder_port:
3908 raise RuntimeError('Must specify try server port number, when '
3909 'gs_bucket is used: --builder_port')
3910 if opts.target_platform == 'cros':
3911 # Run sudo up front to make sure credentials are cached for later.
3912 print 'Sudo is required to build cros:'
3913 print
3914 RunProcess(['sudo', 'true'])
3916 if not opts.cros_board:
3917 raise RuntimeError('missing required parameter: --cros_board')
3919 if not opts.cros_remote_ip:
3920 raise RuntimeError('missing required parameter: --cros_remote_ip')
3922 if not opts.working_directory:
3923 raise RuntimeError('missing required parameter: --working_directory')
3925 metric_values = opts.metric.split('/')
3926 if (len(metric_values) != 2 and
3927 opts.bisect_mode != BISECT_MODE_RETURN_CODE):
3928 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3930 opts.metric = metric_values
3931 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3932 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3933 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3934 opts.truncate_percent = opts.truncate_percent / 100.0
3936 for k, v in opts.__dict__.iteritems():
3937 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
3938 setattr(self, k, v)
3939 except RuntimeError, e:
3940 output_string = StringIO.StringIO()
3941 parser.print_help(file=output_string)
3942 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3943 output_string.close()
3944 raise RuntimeError(error_message)
3946 @staticmethod
3947 def FromDict(values):
3948 """Creates an instance of BisectOptions with the values parsed from a
3949 .cfg file.
3951 Args:
3952 values: a dict containing options to set.
3954 Returns:
3955 An instance of BisectOptions.
3957 opts = BisectOptions()
3958 for k, v in values.iteritems():
3959 assert hasattr(opts, k), 'Invalid %s attribute in '\
3960 'BisectOptions.' % k
3961 setattr(opts, k, v)
3963 metric_values = opts.metric.split('/')
3964 if len(metric_values) != 2:
3965 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3967 opts.metric = metric_values
3968 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3969 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3970 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3971 opts.truncate_percent = opts.truncate_percent / 100.0
3973 return opts
3976 def main():
3978 try:
3979 opts = BisectOptions()
3980 opts.ParseCommandLine()
3982 if opts.extra_src:
3983 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3984 if not extra_src:
3985 raise RuntimeError("Invalid or missing --extra_src.")
3986 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3988 if opts.working_directory:
3989 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3990 if opts.no_custom_deps:
3991 custom_deps = None
3992 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3994 os.chdir(os.path.join(os.getcwd(), 'src'))
3996 if not RemoveBuildFiles(opts.target_build_type):
3997 raise RuntimeError('Something went wrong removing the build files.')
3999 if not IsPlatformSupported(opts):
4000 raise RuntimeError("Sorry, this platform isn't supported yet.")
4002 # Check what source control method they're using. Only support git workflow
4003 # at the moment.
4004 source_control = DetermineAndCreateSourceControl(opts)
4006 if not source_control:
4007 raise RuntimeError("Sorry, only the git workflow is supported at the "
4008 "moment.")
4010 # gClient sync seems to fail if you're not in master branch.
4011 if (not source_control.IsInProperBranch() and
4012 not opts.debug_ignore_sync and
4013 not opts.working_directory):
4014 raise RuntimeError("You must switch to master branch to run bisection.")
4015 bisect_test = BisectPerformanceMetrics(source_control, opts)
4016 try:
4017 bisect_results = bisect_test.Run(opts.command,
4018 opts.bad_revision,
4019 opts.good_revision,
4020 opts.metric)
4021 if bisect_results['error']:
4022 raise RuntimeError(bisect_results['error'])
4023 bisect_test.FormatAndPrintResults(bisect_results)
4024 return 0
4025 finally:
4026 bisect_test.PerformCleanup()
4027 except RuntimeError, e:
4028 if opts.output_buildbot_annotations:
4029 # The perf dashboard scrapes the "results" step in order to comment on
4030 # bugs. If you change this, please update the perf dashboard as well.
4031 bisect_utils.OutputAnnotationStepStart('Results')
4032 print 'Error: %s' % e.message
4033 if opts.output_buildbot_annotations:
4034 bisect_utils.OutputAnnotationStepClosed()
4035 return 1
4037 if __name__ == '__main__':
4038 sys.exit(main())