Add an #else to #ifdef OFFICIAL_BUILD
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blob7444d3700321c08f42f489157f561cf8e9f6836f
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
36 """
38 import copy
39 import datetime
40 import errno
41 import hashlib
42 import math
43 import optparse
44 import os
45 import re
46 import shlex
47 import shutil
48 import StringIO
49 import subprocess
50 import sys
51 import time
52 import zipfile
54 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
56 import bisect_utils
57 import post_perf_builder_job as bisect_builder
58 from telemetry.page import cloud_storage
60 # The additional repositories that might need to be bisected.
61 # If the repository has any dependant repositories (such as skia/src needs
62 # skia/include and skia/gyp to be updated), specify them in the 'depends'
63 # so that they're synced appropriately.
64 # Format is:
65 # src: path to the working directory.
66 # recurse: True if this repositry will get bisected.
67 # depends: A list of other repositories that are actually part of the same
68 # repository in svn.
69 # svn: Needed for git workflow to resolve hashes to svn revisions.
70 # from: Parent depot that must be bisected before this is bisected.
71 # deps_var: Key name in vars varible in DEPS file that has revision information.
72 DEPOT_DEPS_NAME = {
73 'chromium' : {
74 "src" : "src",
75 "recurse" : True,
76 "depends" : None,
77 "from" : ['cros', 'android-chrome'],
78 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79 'deps_var': 'chromium_rev'
81 'webkit' : {
82 "src" : "src/third_party/WebKit",
83 "recurse" : True,
84 "depends" : None,
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87 'deps_var': 'webkit_revision'
89 'angle' : {
90 "src" : "src/third_party/angle",
91 "src_old" : "src/third_party/angle_dx11",
92 "recurse" : True,
93 "depends" : None,
94 "from" : ['chromium'],
95 "platform": 'nt',
96 'deps_var': 'angle_revision'
98 'v8' : {
99 "src" : "src/v8",
100 "recurse" : True,
101 "depends" : None,
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105 'deps_var': 'v8_revision'
107 'v8_bleeding_edge' : {
108 "src" : "src/v8_bleeding_edge",
109 "recurse" : True,
110 "depends" : None,
111 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
112 "from" : ['v8'],
113 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'deps_var': 'v8_revision'
116 'skia/src' : {
117 "src" : "src/third_party/skia/src",
118 "recurse" : True,
119 "svn" : "http://skia.googlecode.com/svn/trunk/src",
120 "depends" : ['skia/include', 'skia/gyp'],
121 "from" : ['chromium'],
122 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 'deps_var': 'skia_revision'
125 'skia/include' : {
126 "src" : "src/third_party/skia/include",
127 "recurse" : False,
128 "svn" : "http://skia.googlecode.com/svn/trunk/include",
129 "depends" : None,
130 "from" : ['chromium'],
131 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
132 'deps_var': 'None'
134 'skia/gyp' : {
135 "src" : "src/third_party/skia/gyp",
136 "recurse" : False,
137 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
138 "depends" : None,
139 "from" : ['chromium'],
140 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
141 'deps_var': 'None'
145 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
146 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147 CROS_VERSION_PATTERN = 'new version number from %s'
148 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
149 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
150 'testing_rsa')
151 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
152 'mod_for_test_scripts', 'ssh_keys',
153 'testing_rsa')
155 BUILD_RESULT_SUCCEED = 0
156 BUILD_RESULT_FAIL = 1
157 BUILD_RESULT_SKIPPED = 2
159 # Maximum time in seconds to wait after posting build request to tryserver.
160 # TODO: Change these values based on the actual time taken by buildbots on
161 # the tryserver.
162 MAX_MAC_BUILD_TIME = 14400
163 MAX_WIN_BUILD_TIME = 14400
164 MAX_LINUX_BUILD_TIME = 14400
166 # Patch template to add a new file, DEPS.sha under src folder.
167 # This file contains SHA1 value of the DEPS changes made while bisecting
168 # dependency repositories. This patch send along with DEPS patch to tryserver.
169 # When a build requested is posted with a patch, bisect builders on tryserver,
170 # once build is produced, it reads SHA value from this file and appends it
171 # to build archive filename.
172 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
173 new file mode 100644
174 --- /dev/null
175 +++ src/DEPS.sha
176 @@ -0,0 +1 @@
177 +%(deps_sha)s
180 # The possible values of the --bisect_mode flag, which determines what to
181 # use when classifying a revision as "good" or "bad".
182 BISECT_MODE_MEAN = 'mean'
183 BISECT_MODE_STD_DEV = 'std_dev'
184 BISECT_MODE_RETURN_CODE = 'return_code'
187 def _AddAdditionalDepotInfo(depot_info):
188 """Adds additional depot info to the global depot variables."""
189 global DEPOT_DEPS_NAME
190 global DEPOT_NAMES
191 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
192 depot_info.items())
193 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
196 def CalculateTruncatedMean(data_set, truncate_percent):
197 """Calculates the truncated mean of a set of values.
199 Note that this isn't just the mean of the set of values with the highest
200 and lowest values discarded; the non-discarded values are also weighted
201 differently depending how many values are discarded.
203 Args:
204 data_set: Non-empty list of values.
205 truncate_percent: The % from the upper and lower portions of the data set
206 to discard, expressed as a value in [0, 1].
208 Returns:
209 The truncated mean as a float.
211 Raises:
212 TypeError: The data set was empty after discarding values.
214 if len(data_set) > 2:
215 data_set = sorted(data_set)
217 discard_num_float = len(data_set) * truncate_percent
218 discard_num_int = int(math.floor(discard_num_float))
219 kept_weight = len(data_set) - discard_num_float * 2
221 data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
223 weight_left = 1.0 - (discard_num_float - discard_num_int)
225 if weight_left < 1:
226 # If the % to discard leaves a fractional portion, need to weight those
227 # values.
228 unweighted_vals = data_set[1:len(data_set)-1]
229 weighted_vals = [data_set[0], data_set[len(data_set)-1]]
230 weighted_vals = [w * weight_left for w in weighted_vals]
231 data_set = weighted_vals + unweighted_vals
232 else:
233 kept_weight = len(data_set)
235 truncated_mean = reduce(lambda x, y: float(x) + float(y),
236 data_set) / kept_weight
238 return truncated_mean
241 def CalculateMean(values):
242 """Calculates the arithmetic mean of a list of values."""
243 return CalculateTruncatedMean(values, 0.0)
246 def CalculateConfidence(good_results_lists, bad_results_lists):
247 """Calculates a confidence percentage.
249 This is calculated based on how distinct the "good" and "bad" values are,
250 and how noisy the results are. More precisely, the confidence is the quotient
251 of the difference between the closest values across the good and bad groups
252 and the sum of the standard deviations of the good and bad groups.
254 TODO(qyearsley): Replace this confidence function with a function that
255 uses a Student's t-test. The confidence would be (1 - p-value), where
256 p-value is the probability of obtaining the given a set of good and bad
257 values just by chance.
259 Args:
260 good_results_lists: A list of lists of "good" result numbers.
261 bad_results_lists: A list of lists of "bad" result numbers.
263 Returns:
264 A number between in the range [0, 100].
266 # Get the distance between the two groups.
267 means_good = map(CalculateMean, good_results_lists)
268 means_bad = map(CalculateMean, bad_results_lists)
269 bounds_good = (min(means_good), max(means_good))
270 bounds_bad = (min(means_bad), max(means_bad))
271 dist_between_groups = min(
272 math.fabs(bounds_bad[1] - bounds_good[0]),
273 math.fabs(bounds_bad[0] - bounds_good[1]))
275 # Get the sum of the standard deviations of the two groups.
276 good_results_flattened = sum(good_results_lists, [])
277 bad_results_flattened = sum(bad_results_lists, [])
278 stddev_good = CalculateStandardDeviation(good_results_flattened)
279 stddev_bad = CalculateStandardDeviation(bad_results_flattened)
280 stddev_sum = stddev_good + stddev_bad
282 confidence = dist_between_groups / (max(0.0001, stddev_sum))
283 confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
284 return confidence
287 def CalculateStandardDeviation(values):
288 """Calculates the sample standard deviation of the given list of values."""
289 if len(values) == 1:
290 return 0.0
292 mean = CalculateMean(values)
293 differences_from_mean = [float(x) - mean for x in values]
294 squared_differences = [float(x * x) for x in differences_from_mean]
295 variance = sum(squared_differences) / (len(values) - 1)
296 std_dev = math.sqrt(variance)
298 return std_dev
301 def CalculateRelativeChange(before, after):
302 """Returns the relative change of before and after, relative to before.
304 There are several different ways to define relative difference between
305 two numbers; sometimes it is defined as relative to the smaller number,
306 or to the mean of the two numbers. This version returns the difference
307 relative to the first of the two numbers.
309 Args:
310 before: A number representing an earlier value.
311 after: Another number, representing a later value.
313 Returns:
314 A non-negative floating point number; 0.1 represents a 10% change.
316 if before == after:
317 return 0.0
318 if before == 0:
319 return float('nan')
320 difference = after - before
321 return math.fabs(difference / before)
324 def CalculatePooledStandardError(work_sets):
325 numerator = 0.0
326 denominator1 = 0.0
327 denominator2 = 0.0
329 for current_set in work_sets:
330 std_dev = CalculateStandardDeviation(current_set)
331 numerator += (len(current_set) - 1) * std_dev ** 2
332 denominator1 += len(current_set) - 1
333 denominator2 += 1.0 / len(current_set)
335 if denominator1:
336 return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
337 return 0.0
340 def CalculateStandardError(values):
341 """Calculates the standard error of a list of values."""
342 if len(values) <= 1:
343 return 0.0
345 std_dev = CalculateStandardDeviation(values)
347 return std_dev / math.sqrt(len(values))
350 def IsStringFloat(string_to_check):
351 """Checks whether or not the given string can be converted to a floating
352 point number.
354 Args:
355 string_to_check: Input string to check if it can be converted to a float.
357 Returns:
358 True if the string can be converted to a float.
360 try:
361 float(string_to_check)
363 return True
364 except ValueError:
365 return False
368 def IsStringInt(string_to_check):
369 """Checks whether or not the given string can be converted to a integer.
371 Args:
372 string_to_check: Input string to check if it can be converted to an int.
374 Returns:
375 True if the string can be converted to an int.
377 try:
378 int(string_to_check)
380 return True
381 except ValueError:
382 return False
385 def IsWindows():
386 """Checks whether or not the script is running on Windows.
388 Returns:
389 True if running on Windows.
391 return sys.platform == 'cygwin' or sys.platform.startswith('win')
394 def Is64BitWindows():
395 """Returns whether or not Windows is a 64-bit version.
397 Returns:
398 True if Windows is 64-bit, False if 32-bit.
400 platform = os.environ['PROCESSOR_ARCHITECTURE']
401 try:
402 platform = os.environ['PROCESSOR_ARCHITEW6432']
403 except KeyError:
404 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
405 pass
407 return platform in ['AMD64', 'I64']
410 def IsLinux():
411 """Checks whether or not the script is running on Linux.
413 Returns:
414 True if running on Linux.
416 return sys.platform.startswith('linux')
419 def IsMac():
420 """Checks whether or not the script is running on Mac.
422 Returns:
423 True if running on Mac.
425 return sys.platform.startswith('darwin')
428 def GetSHA1HexDigest(contents):
429 """Returns secured hash containing hexadecimal for the given contents."""
430 return hashlib.sha1(contents).hexdigest()
433 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
434 """Gets the archive file name for the given revision."""
435 def PlatformName():
436 """Return a string to be used in paths for the platform."""
437 if IsWindows():
438 # Build archive for x64 is still stored with 'win32'suffix
439 # (chromium_utils.PlatformName()).
440 if Is64BitWindows() and target_arch == 'x64':
441 return 'win32'
442 return 'win32'
443 if IsLinux():
444 return 'linux'
445 if IsMac():
446 return 'mac'
447 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
449 base_name = 'full-build-%s' % PlatformName()
450 if not build_revision:
451 return base_name
452 if patch_sha:
453 build_revision = '%s_%s' % (build_revision , patch_sha)
454 return '%s_%s.zip' % (base_name, build_revision)
457 def GetRemoteBuildPath(build_revision, target_arch='ia32', patch_sha=None):
458 """Compute the url to download the build from."""
459 def GetGSRootFolderName():
460 """Gets Google Cloud Storage root folder names"""
461 if IsWindows():
462 if Is64BitWindows() and target_arch == 'x64':
463 return 'Win x64 Builder'
464 return 'Win Builder'
465 if IsLinux():
466 return 'Linux Builder'
467 if IsMac():
468 return 'Mac Builder'
469 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
471 base_filename = GetZipFileName(build_revision, target_arch, patch_sha)
472 builder_folder = GetGSRootFolderName()
473 return '%s/%s' % (builder_folder, base_filename)
476 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
477 """Fetches file(s) from the Google Cloud Storage.
479 Args:
480 bucket_name: Google Storage bucket name.
481 source_path: Source file path.
482 destination_path: Destination file path.
484 Returns:
485 True if the fetching succeeds, otherwise False.
487 target_file = os.path.join(destination_path, os.path.basename(source_path))
488 try:
489 if cloud_storage.Exists(bucket_name, source_path):
490 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
491 cloud_storage.Get(bucket_name, source_path, destination_path)
492 if os.path.exists(target_file):
493 return True
494 else:
495 print ('File gs://%s/%s not found in cloud storage.' % (
496 bucket_name, source_path))
497 except Exception as e:
498 print 'Something went wrong while fetching file from cloud: %s' % e
499 if os.path.exists(target_file):
500 os.remove(target_file)
501 return False
504 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
505 def MaybeMakeDirectory(*path):
506 """Creates an entire path, if it doesn't already exist."""
507 file_path = os.path.join(*path)
508 try:
509 os.makedirs(file_path)
510 except OSError, e:
511 if e.errno != errno.EEXIST:
512 return False
513 return True
516 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
517 def ExtractZip(filename, output_dir, verbose=True):
518 """ Extract the zip archive in the output directory."""
519 MaybeMakeDirectory(output_dir)
521 # On Linux and Mac, we use the unzip command as it will
522 # handle links and file bits (executable), which is much
523 # easier then trying to do that with ZipInfo options.
525 # The Mac Version of unzip unfortunately does not support Zip64, whereas
526 # the python module does, so we have to fallback to the python zip module
527 # on Mac if the filesize is greater than 4GB.
529 # On Windows, try to use 7z if it is installed, otherwise fall back to python
530 # zip module and pray we don't have files larger than 512MB to unzip.
531 unzip_cmd = None
532 if ((IsMac() and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
533 or IsLinux()):
534 unzip_cmd = ['unzip', '-o']
535 elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
536 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
538 if unzip_cmd:
539 # Make sure path is absolute before changing directories.
540 filepath = os.path.abspath(filename)
541 saved_dir = os.getcwd()
542 os.chdir(output_dir)
543 command = unzip_cmd + [filepath]
544 result = RunProcess(command)
545 os.chdir(saved_dir)
546 if result:
547 raise IOError('unzip failed: %s => %s' % (str(command), result))
548 else:
549 assert IsWindows() or IsMac()
550 zf = zipfile.ZipFile(filename)
551 for name in zf.namelist():
552 if verbose:
553 print 'Extracting %s' % name
554 zf.extract(name, output_dir)
555 if IsMac():
556 # Restore permission bits.
557 os.chmod(os.path.join(output_dir, name),
558 zf.getinfo(name).external_attr >> 16L)
561 def RunProcess(command):
562 """Runs an arbitrary command.
564 If output from the call is needed, use RunProcessAndRetrieveOutput instead.
566 Args:
567 command: A list containing the command and args to execute.
569 Returns:
570 The return code of the call.
572 # On Windows, use shell=True to get PATH interpretation.
573 shell = IsWindows()
574 return subprocess.call(command, shell=shell)
577 def RunProcessAndRetrieveOutput(command, cwd=None):
578 """Runs an arbitrary command, returning its output and return code.
580 Since output is collected via communicate(), there will be no output until
581 the call terminates. If you need output while the program runs (ie. so
582 that the buildbot doesn't terminate the script), consider RunProcess().
584 Args:
585 command: A list containing the command and args to execute.
586 cwd: A directory to change to while running the command. The command can be
587 relative to this directory. If this is None, the command will be run in
588 the current directory.
590 Returns:
591 A tuple of the output and return code.
593 if cwd:
594 original_cwd = os.getcwd()
595 os.chdir(cwd)
597 # On Windows, use shell=True to get PATH interpretation.
598 shell = IsWindows()
599 proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE)
600 (output, _) = proc.communicate()
602 if cwd:
603 os.chdir(original_cwd)
605 return (output, proc.returncode)
608 def RunGit(command, cwd=None):
609 """Run a git subcommand, returning its output and return code.
611 Args:
612 command: A list containing the args to git.
613 cwd: A directory to change to while running the git command (optional).
615 Returns:
616 A tuple of the output and return code.
618 command = ['git'] + command
620 return RunProcessAndRetrieveOutput(command, cwd=cwd)
623 def CheckRunGit(command, cwd=None):
624 """Run a git subcommand, returning its output and return code. Asserts if
625 the return code of the call is non-zero.
627 Args:
628 command: A list containing the args to git.
630 Returns:
631 A tuple of the output and return code.
633 (output, return_code) = RunGit(command, cwd=cwd)
635 assert not return_code, 'An error occurred while running'\
636 ' "git %s"' % ' '.join(command)
637 return output
640 def SetBuildSystemDefault(build_system):
641 """Sets up any environment variables needed to build with the specified build
642 system.
644 Args:
645 build_system: A string specifying build system. Currently only 'ninja' or
646 'make' are supported."""
647 if build_system == 'ninja':
648 gyp_var = os.getenv('GYP_GENERATORS')
650 if not gyp_var or not 'ninja' in gyp_var:
651 if gyp_var:
652 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
653 else:
654 os.environ['GYP_GENERATORS'] = 'ninja'
656 if IsWindows():
657 os.environ['GYP_DEFINES'] = 'component=shared_library '\
658 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
659 'chromium_win_pch=0'
660 elif build_system == 'make':
661 os.environ['GYP_GENERATORS'] = 'make'
662 else:
663 raise RuntimeError('%s build not supported.' % build_system)
666 def BuildWithMake(threads, targets, build_type='Release'):
667 cmd = ['make', 'BUILDTYPE=%s' % build_type]
669 if threads:
670 cmd.append('-j%d' % threads)
672 cmd += targets
674 return_code = RunProcess(cmd)
676 return not return_code
679 def BuildWithNinja(threads, targets, build_type='Release'):
680 cmd = ['ninja', '-C', os.path.join('out', build_type)]
682 if threads:
683 cmd.append('-j%d' % threads)
685 cmd += targets
687 return_code = RunProcess(cmd)
689 return not return_code
692 def BuildWithVisualStudio(targets, build_type='Release'):
693 path_to_devenv = os.path.abspath(
694 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
695 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
696 cmd = [path_to_devenv, '/build', build_type, path_to_sln]
698 for t in targets:
699 cmd.extend(['/Project', t])
701 return_code = RunProcess(cmd)
703 return not return_code
706 def WriteStringToFile(text, file_name):
707 try:
708 with open(file_name, "wb") as f:
709 f.write(text)
710 except IOError as e:
711 raise RuntimeError('Error writing to file [%s]' % file_name )
714 def ReadStringFromFile(file_name):
715 try:
716 with open(file_name) as f:
717 return f.read()
718 except IOError as e:
719 raise RuntimeError('Error reading file [%s]' % file_name )
722 def ChangeBackslashToSlashInPatch(diff_text):
723 """Formats file paths in the given text to unix-style paths."""
724 if diff_text:
725 diff_lines = diff_text.split('\n')
726 for i in range(len(diff_lines)):
727 if (diff_lines[i].startswith('--- ') or
728 diff_lines[i].startswith('+++ ')):
729 diff_lines[i] = diff_lines[i].replace('\\', '/')
730 return '\n'.join(diff_lines)
731 return None
734 class Builder(object):
735 """Builder is used by the bisect script to build relevant targets and deploy.
737 def __init__(self, opts):
738 """Performs setup for building with target build system.
740 Args:
741 opts: Options parsed from command line.
743 if IsWindows():
744 if not opts.build_preference:
745 opts.build_preference = 'msvs'
747 if opts.build_preference == 'msvs':
748 if not os.getenv('VS100COMNTOOLS'):
749 raise RuntimeError(
750 'Path to visual studio could not be determined.')
751 else:
752 SetBuildSystemDefault(opts.build_preference)
753 else:
754 if not opts.build_preference:
755 if 'ninja' in os.getenv('GYP_GENERATORS'):
756 opts.build_preference = 'ninja'
757 else:
758 opts.build_preference = 'make'
760 SetBuildSystemDefault(opts.build_preference)
762 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
763 raise RuntimeError('Failed to set platform environment.')
765 @staticmethod
766 def FromOpts(opts):
767 builder = None
768 if opts.target_platform == 'cros':
769 builder = CrosBuilder(opts)
770 elif opts.target_platform == 'android':
771 builder = AndroidBuilder(opts)
772 elif opts.target_platform == 'android-chrome':
773 builder = AndroidChromeBuilder(opts)
774 else:
775 builder = DesktopBuilder(opts)
776 return builder
778 def Build(self, depot, opts):
779 raise NotImplementedError()
781 def GetBuildOutputDirectory(self, opts, src_dir=None):
782 raise NotImplementedError()
785 class DesktopBuilder(Builder):
786 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
787 def __init__(self, opts):
788 super(DesktopBuilder, self).__init__(opts)
790 def Build(self, depot, opts):
791 """Builds chromium_builder_perf target using options passed into
792 the script.
794 Args:
795 depot: Current depot being bisected.
796 opts: The options parsed from the command line.
798 Returns:
799 True if build was successful.
801 targets = ['chromium_builder_perf']
803 threads = None
804 if opts.use_goma:
805 threads = 64
807 build_success = False
808 if opts.build_preference == 'make':
809 build_success = BuildWithMake(threads, targets, opts.target_build_type)
810 elif opts.build_preference == 'ninja':
811 build_success = BuildWithNinja(threads, targets, opts.target_build_type)
812 elif opts.build_preference == 'msvs':
813 assert IsWindows(), 'msvs is only supported on Windows.'
814 build_success = BuildWithVisualStudio(targets, opts.target_build_type)
815 else:
816 assert False, 'No build system defined.'
817 return build_success
819 def GetBuildOutputDirectory(self, opts, src_dir=None):
820 """Returns the path to the build directory, relative to the checkout root.
822 Assumes that the current working directory is the checkout root.
824 src_dir = src_dir or 'src'
825 if opts.build_preference == 'ninja' or IsLinux():
826 return os.path.join(src_dir, 'out')
827 if IsMac():
828 return os.path.join(src_dir, 'xcodebuild')
829 if IsWindows():
830 return os.path.join(src_dir, 'build')
831 raise NotImplementedError('Unexpected platform %s' % sys.platform)
834 class AndroidBuilder(Builder):
835 """AndroidBuilder is used to build on android."""
836 def __init__(self, opts):
837 super(AndroidBuilder, self).__init__(opts)
839 def _GetTargets(self):
840 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
842 def Build(self, depot, opts):
843 """Builds the android content shell and other necessary tools using options
844 passed into the script.
846 Args:
847 depot: Current depot being bisected.
848 opts: The options parsed from the command line.
850 Returns:
851 True if build was successful.
853 threads = None
854 if opts.use_goma:
855 threads = 64
857 build_success = False
858 if opts.build_preference == 'ninja':
859 build_success = BuildWithNinja(
860 threads, self._GetTargets(), opts.target_build_type)
861 else:
862 assert False, 'No build system defined.'
864 return build_success
867 class AndroidChromeBuilder(AndroidBuilder):
868 """AndroidBuilder is used to build on android's chrome."""
869 def __init__(self, opts):
870 super(AndroidChromeBuilder, self).__init__(opts)
872 def _GetTargets(self):
873 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
876 class CrosBuilder(Builder):
877 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
878 target platform."""
879 def __init__(self, opts):
880 super(CrosBuilder, self).__init__(opts)
882 def ImageToTarget(self, opts):
883 """Installs latest image to target specified by opts.cros_remote_ip.
885 Args:
886 opts: Program options containing cros_board and cros_remote_ip.
888 Returns:
889 True if successful.
891 try:
892 # Keys will most likely be set to 0640 after wiping the chroot.
893 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
894 os.chmod(CROS_TEST_KEY_PATH, 0600)
895 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
896 '--remote=%s' % opts.cros_remote_ip,
897 '--board=%s' % opts.cros_board, '--test', '--verbose']
899 return_code = RunProcess(cmd)
900 return not return_code
901 except OSError, e:
902 return False
904 def BuildPackages(self, opts, depot):
905 """Builds packages for cros.
907 Args:
908 opts: Program options containing cros_board.
909 depot: The depot being bisected.
911 Returns:
912 True if successful.
914 cmd = [CROS_SDK_PATH]
916 if depot != 'cros':
917 path_to_chrome = os.path.join(os.getcwd(), '..')
918 cmd += ['--chrome_root=%s' % path_to_chrome]
920 cmd += ['--']
922 if depot != 'cros':
923 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
925 cmd += ['BUILDTYPE=%s' % opts.target_build_type, './build_packages',
926 '--board=%s' % opts.cros_board]
927 return_code = RunProcess(cmd)
929 return not return_code
931 def BuildImage(self, opts, depot):
932 """Builds test image for cros.
934 Args:
935 opts: Program options containing cros_board.
936 depot: The depot being bisected.
938 Returns:
939 True if successful.
941 cmd = [CROS_SDK_PATH]
943 if depot != 'cros':
944 path_to_chrome = os.path.join(os.getcwd(), '..')
945 cmd += ['--chrome_root=%s' % path_to_chrome]
947 cmd += ['--']
949 if depot != 'cros':
950 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
952 cmd += ['BUILDTYPE=%s' % opts.target_build_type, '--', './build_image',
953 '--board=%s' % opts.cros_board, 'test']
955 return_code = RunProcess(cmd)
957 return not return_code
959 def Build(self, depot, opts):
960 """Builds targets using options passed into the script.
962 Args:
963 depot: Current depot being bisected.
964 opts: The options parsed from the command line.
966 Returns:
967 True if build was successful.
969 if self.BuildPackages(opts, depot):
970 if self.BuildImage(opts, depot):
971 return self.ImageToTarget(opts)
972 return False
975 class SourceControl(object):
976 """SourceControl is an abstraction over the underlying source control
977 system used for chromium. For now only git is supported, but in the
978 future, the svn workflow could be added as well."""
979 def __init__(self):
980 super(SourceControl, self).__init__()
982 def SyncToRevisionWithGClient(self, revision):
983 """Uses gclient to sync to the specified revision.
985 ie. gclient sync --revision <revision>
987 Args:
988 revision: The git SHA1 or svn CL (depending on workflow).
990 Returns:
991 The return code of the call.
993 return bisect_utils.RunGClient(['sync', '--verbose', '--reset', '--force',
994 '--delete_unversioned_trees', '--nohooks', '--revision', revision])
996 def SyncToRevisionWithRepo(self, timestamp):
997 """Uses repo to sync all the underlying git depots to the specified
998 time.
1000 Args:
1001 timestamp: The unix timestamp to sync to.
1003 Returns:
1004 The return code of the call.
1006 return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
1009 class GitSourceControl(SourceControl):
1010 """GitSourceControl is used to query the underlying source control. """
1011 def __init__(self, opts):
1012 super(GitSourceControl, self).__init__()
1013 self.opts = opts
1015 def IsGit(self):
1016 return True
1018 def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
1019 """Retrieves a list of revisions between |revision_range_start| and
1020 |revision_range_end|.
1022 Args:
1023 revision_range_end: The SHA1 for the end of the range.
1024 revision_range_start: The SHA1 for the beginning of the range.
1026 Returns:
1027 A list of the revisions between |revision_range_start| and
1028 |revision_range_end| (inclusive).
1030 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
1031 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
1032 log_output = CheckRunGit(cmd, cwd=cwd)
1034 revision_hash_list = log_output.split()
1035 revision_hash_list.append(revision_range_start)
1037 return revision_hash_list
1039 def SyncToRevision(self, revision, sync_client=None):
1040 """Syncs to the specified revision.
1042 Args:
1043 revision: The revision to sync to.
1044 use_gclient: Specifies whether or not we should sync using gclient or
1045 just use source control directly.
1047 Returns:
1048 True if successful.
1051 if not sync_client:
1052 results = RunGit(['checkout', revision])[1]
1053 elif sync_client == 'gclient':
1054 results = self.SyncToRevisionWithGClient(revision)
1055 elif sync_client == 'repo':
1056 results = self.SyncToRevisionWithRepo(revision)
1058 return not results
1060 def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
1061 """If an SVN revision is supplied, try to resolve it to a git SHA1.
1063 Args:
1064 revision_to_check: The user supplied revision string that may need to be
1065 resolved to a git SHA1.
1066 depot: The depot the revision_to_check is from.
1067 search: The number of changelists to try if the first fails to resolve
1068 to a git hash. If the value is negative, the function will search
1069 backwards chronologically, otherwise it will search forward.
1071 Returns:
1072 A string containing a git SHA1 hash, otherwise None.
1074 # Android-chrome is git only, so no need to resolve this to anything else.
1075 if depot == 'android-chrome':
1076 return revision_to_check
1078 if depot != 'cros':
1079 if not IsStringInt(revision_to_check):
1080 return revision_to_check
1082 depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
1084 if depot != 'chromium':
1085 depot_svn = DEPOT_DEPS_NAME[depot]['svn']
1087 svn_revision = int(revision_to_check)
1088 git_revision = None
1090 if search > 0:
1091 search_range = xrange(svn_revision, svn_revision + search, 1)
1092 else:
1093 search_range = xrange(svn_revision, svn_revision + search, -1)
1095 for i in search_range:
1096 svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
1097 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
1098 'origin/master']
1100 (log_output, return_code) = RunGit(cmd, cwd=cwd)
1102 assert not return_code, 'An error occurred while running'\
1103 ' "git %s"' % ' '.join(cmd)
1105 if not return_code:
1106 log_output = log_output.strip()
1108 if log_output:
1109 git_revision = log_output
1111 break
1113 return git_revision
1114 else:
1115 if IsStringInt(revision_to_check):
1116 return int(revision_to_check)
1117 else:
1118 cwd = os.getcwd()
1119 os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
1120 'chromiumos-overlay'))
1121 pattern = CROS_VERSION_PATTERN % revision_to_check
1122 cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
1124 git_revision = None
1126 log_output = CheckRunGit(cmd, cwd=cwd)
1127 if log_output:
1128 git_revision = log_output
1129 git_revision = int(log_output.strip())
1130 os.chdir(cwd)
1132 return git_revision
1134 def IsInProperBranch(self):
1135 """Confirms they're in the master branch for performing the bisection.
1136 This is needed or gclient will fail to sync properly.
1138 Returns:
1139 True if the current branch on src is 'master'
1141 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
1142 log_output = CheckRunGit(cmd)
1143 log_output = log_output.strip()
1145 return log_output == "master"
1147 def SVNFindRev(self, revision, cwd=None):
1148 """Maps directly to the 'git svn find-rev' command.
1150 Args:
1151 revision: The git SHA1 to use.
1153 Returns:
1154 An integer changelist #, otherwise None.
1157 cmd = ['svn', 'find-rev', revision]
1159 output = CheckRunGit(cmd, cwd)
1160 svn_revision = output.strip()
1162 if IsStringInt(svn_revision):
1163 return int(svn_revision)
1165 return None
1167 def QueryRevisionInfo(self, revision, cwd=None):
1168 """Gathers information on a particular revision, such as author's name,
1169 email, subject, and date.
1171 Args:
1172 revision: Revision you want to gather information on.
1173 Returns:
1174 A dict in the following format:
1176 'author': %s,
1177 'email': %s,
1178 'date': %s,
1179 'subject': %s,
1180 'body': %s,
1183 commit_info = {}
1185 formats = ['%cN', '%cE', '%s', '%cD', '%b']
1186 targets = ['author', 'email', 'subject', 'date', 'body']
1188 for i in xrange(len(formats)):
1189 cmd = ['log', '--format=%s' % formats[i], '-1', revision]
1190 output = CheckRunGit(cmd, cwd=cwd)
1191 commit_info[targets[i]] = output.rstrip()
1193 return commit_info
1195 def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
1196 """Performs a checkout on a file at the given revision.
1198 Returns:
1199 True if successful.
1201 return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
1203 def RevertFileToHead(self, file_name):
1204 """Unstages a file and returns it to HEAD.
1206 Returns:
1207 True if successful.
1209 # Reset doesn't seem to return 0 on success.
1210 RunGit(['reset', 'HEAD', file_name])
1212 return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
1214 def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
1215 """Returns a list of commits that modified this file.
1217 Args:
1218 filename: Name of file.
1219 revision_start: Start of revision range.
1220 revision_end: End of revision range.
1222 Returns:
1223 Returns a list of commits that touched this file.
1225 cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
1226 filename]
1227 output = CheckRunGit(cmd)
1229 return [o for o in output.split('\n') if o]
1232 class BisectPerformanceMetrics(object):
1233 """This class contains functionality to perform a bisection of a range of
1234 revisions to narrow down where performance regressions may have occurred.
1236 The main entry-point is the Run method.
1239 def __init__(self, source_control, opts):
1240 super(BisectPerformanceMetrics, self).__init__()
1242 self.opts = opts
1243 self.source_control = source_control
1244 self.src_cwd = os.getcwd()
1245 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1246 self.depot_cwd = {}
1247 self.cleanup_commands = []
1248 self.warnings = []
1249 self.builder = Builder.FromOpts(opts)
1251 # This always starts true since the script grabs latest first.
1252 self.was_blink = True
1254 for d in DEPOT_NAMES:
1255 # The working directory of each depot is just the path to the depot, but
1256 # since we're already in 'src', we can skip that part.
1258 self.depot_cwd[d] = os.path.join(
1259 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1261 def PerformCleanup(self):
1262 """Performs cleanup when script is finished."""
1263 os.chdir(self.src_cwd)
1264 for c in self.cleanup_commands:
1265 if c[0] == 'mv':
1266 shutil.move(c[1], c[2])
1267 else:
1268 assert False, 'Invalid cleanup command.'
1270 def GetRevisionList(self, depot, bad_revision, good_revision):
1271 """Retrieves a list of all the commits between the bad revision and
1272 last known good revision."""
1274 revision_work_list = []
1276 if depot == 'cros':
1277 revision_range_start = good_revision
1278 revision_range_end = bad_revision
1280 cwd = os.getcwd()
1281 self.ChangeToDepotWorkingDirectory('cros')
1283 # Print the commit timestamps for every commit in the revision time
1284 # range. We'll sort them and bisect by that. There is a remote chance that
1285 # 2 (or more) commits will share the exact same timestamp, but it's
1286 # probably safe to ignore that case.
1287 cmd = ['repo', 'forall', '-c',
1288 'git log --format=%%ct --before=%d --after=%d' % (
1289 revision_range_end, revision_range_start)]
1290 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1292 assert not return_code, 'An error occurred while running'\
1293 ' "%s"' % ' '.join(cmd)
1295 os.chdir(cwd)
1297 revision_work_list = list(set(
1298 [int(o) for o in output.split('\n') if IsStringInt(o)]))
1299 revision_work_list = sorted(revision_work_list, reverse=True)
1300 else:
1301 cwd = self._GetDepotDirectory(depot)
1302 revision_work_list = self.source_control.GetRevisionList(bad_revision,
1303 good_revision, cwd=cwd)
1305 return revision_work_list
1307 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1308 svn_revision = self.source_control.SVNFindRev(revision)
1310 if IsStringInt(svn_revision):
1311 # V8 is tricky to bisect, in that there are only a few instances when
1312 # we can dive into bleeding_edge and get back a meaningful result.
1313 # Try to detect a V8 "business as usual" case, which is when:
1314 # 1. trunk revision N has description "Version X.Y.Z"
1315 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1316 # trunk. Now working on X.Y.(Z+1)."
1318 # As of 01/24/2014, V8 trunk descriptions are formatted:
1319 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1320 # So we can just try parsing that out first and fall back to the old way.
1321 v8_dir = self._GetDepotDirectory('v8')
1322 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1324 revision_info = self.source_control.QueryRevisionInfo(revision,
1325 cwd=v8_dir)
1327 version_re = re.compile("Version (?P<values>[0-9,.]+)")
1329 regex_results = version_re.search(revision_info['subject'])
1331 if regex_results:
1332 git_revision = None
1334 # Look for "based on bleeding_edge" and parse out revision
1335 if 'based on bleeding_edge' in revision_info['subject']:
1336 try:
1337 bleeding_edge_revision = revision_info['subject'].split(
1338 'bleeding_edge revision r')[1]
1339 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1340 git_revision = self.source_control.ResolveToRevision(
1341 bleeding_edge_revision, 'v8_bleeding_edge', 1,
1342 cwd=v8_bleeding_edge_dir)
1343 return git_revision
1344 except (IndexError, ValueError):
1345 pass
1347 if not git_revision:
1348 # Wasn't successful, try the old way of looking for "Prepare push to"
1349 git_revision = self.source_control.ResolveToRevision(
1350 int(svn_revision) - 1, 'v8_bleeding_edge', -1,
1351 cwd=v8_bleeding_edge_dir)
1353 if git_revision:
1354 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1355 cwd=v8_bleeding_edge_dir)
1357 if 'Prepare push to trunk' in revision_info['subject']:
1358 return git_revision
1359 return None
1361 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1362 cwd = self._GetDepotDirectory('v8')
1363 cmd = ['log', '--format=%ct', '-1', revision]
1364 output = CheckRunGit(cmd, cwd=cwd)
1365 commit_time = int(output)
1366 commits = []
1368 if search_forward:
1369 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1370 'origin/master']
1371 output = CheckRunGit(cmd, cwd=cwd)
1372 output = output.split()
1373 commits = output
1374 commits = reversed(commits)
1375 else:
1376 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1377 'origin/master']
1378 output = CheckRunGit(cmd, cwd=cwd)
1379 output = output.split()
1380 commits = output
1382 bleeding_edge_revision = None
1384 for c in commits:
1385 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1386 if bleeding_edge_revision:
1387 break
1389 return bleeding_edge_revision
1391 def _ParseRevisionsFromDEPSFileManually(self, deps_file_contents):
1392 """Manually parses the vars section of the DEPS file to determine
1393 chromium/blink/etc... revisions.
1395 Returns:
1396 A dict in the format {depot:revision} if successful, otherwise None.
1398 # We'll parse the "vars" section of the DEPS file.
1399 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
1400 re_results = rxp.search(deps_file_contents)
1401 locals = {}
1403 if not re_results:
1404 return None
1406 # We should be left with a series of entries in the vars component of
1407 # the DEPS file with the following format:
1408 # 'depot_name': 'revision',
1409 vars_body = re_results.group('vars_body')
1410 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
1411 re.MULTILINE)
1412 re_results = rxp.findall(vars_body)
1414 return dict(re_results)
1416 def _ParseRevisionsFromDEPSFile(self, depot):
1417 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1418 be needed if the bisect recurses into those depots later.
1420 Args:
1421 depot: Depot being bisected.
1423 Returns:
1424 A dict in the format {depot:revision} if successful, otherwise None.
1426 try:
1427 deps_data = {'Var': lambda _: deps_data["vars"][_],
1428 'From': lambda *args: None
1430 execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data)
1431 deps_data = deps_data['deps']
1433 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1434 results = {}
1435 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems():
1436 if (depot_data.get('platform') and
1437 depot_data.get('platform') != os.name):
1438 continue
1440 if (depot_data.get('recurse') and depot in depot_data.get('from')):
1441 depot_data_src = depot_data.get('src') or depot_data.get('src_old')
1442 src_dir = deps_data.get(depot_data_src)
1443 if src_dir:
1444 self.depot_cwd[depot_name] = os.path.join(self.src_cwd,
1445 depot_data_src[4:])
1446 re_results = rxp.search(src_dir)
1447 if re_results:
1448 results[depot_name] = re_results.group('revision')
1449 else:
1450 warning_text = ('Couldn\'t parse revision for %s while bisecting '
1451 '%s' % (depot_name, depot))
1452 if not warning_text in self.warnings:
1453 self.warnings.append(warning_text)
1454 else:
1455 results[depot_name] = None
1456 return results
1457 except ImportError:
1458 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT)
1459 parse_results = self._ParseRevisionsFromDEPSFileManually(
1460 deps_file_contents)
1461 results = {}
1462 for depot_name, depot_revision in parse_results.iteritems():
1463 depot_revision = depot_revision.strip('@')
1464 print depot_name, depot_revision
1465 for current_name, current_data in DEPOT_DEPS_NAME.iteritems():
1466 if (current_data.has_key('deps_var') and
1467 current_data['deps_var'] == depot_name):
1468 src_name = current_name
1469 results[src_name] = depot_revision
1470 break
1471 return results
1473 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1474 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1476 Returns:
1477 A dict in the format {depot:revision} if successful, otherwise None.
1479 cwd = os.getcwd()
1480 self.ChangeToDepotWorkingDirectory(depot)
1482 results = {}
1484 if depot == 'chromium' or depot == 'android-chrome':
1485 results = self._ParseRevisionsFromDEPSFile(depot)
1486 os.chdir(cwd)
1487 elif depot == 'cros':
1488 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1489 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1490 CROS_CHROMEOS_PATTERN]
1491 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1493 assert not return_code, 'An error occurred while running' \
1494 ' "%s"' % ' '.join(cmd)
1496 if len(output) > CROS_CHROMEOS_PATTERN:
1497 output = output[len(CROS_CHROMEOS_PATTERN):]
1499 if len(output) > 1:
1500 output = output.split('_')[0]
1502 if len(output) > 3:
1503 contents = output.split('.')
1505 version = contents[2]
1507 if contents[3] != '0':
1508 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1509 (version, contents[3], version)
1510 if not warningText in self.warnings:
1511 self.warnings.append(warningText)
1513 cwd = os.getcwd()
1514 self.ChangeToDepotWorkingDirectory('chromium')
1515 return_code = CheckRunGit(['log', '-1', '--format=%H',
1516 '--author=chrome-release@google.com', '--grep=to %s' % version,
1517 'origin/master'])
1518 os.chdir(cwd)
1520 results['chromium'] = output.strip()
1521 elif depot == 'v8':
1522 # We can't try to map the trunk revision to bleeding edge yet, because
1523 # we don't know which direction to try to search in. Have to wait until
1524 # the bisect has narrowed the results down to 2 v8 rolls.
1525 results['v8_bleeding_edge'] = None
1527 return results
1529 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1530 """Backs up or restores build output directory based on restore argument.
1532 Args:
1533 restore: Indicates whether to restore or backup. Default is False(Backup)
1534 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1536 Returns:
1537 Path to backup or restored location as string. otherwise None if it fails.
1539 build_dir = os.path.abspath(
1540 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1541 source_dir = os.path.join(build_dir, build_type)
1542 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1543 if restore:
1544 source_dir, destination_dir = destination_dir, source_dir
1545 if os.path.exists(source_dir):
1546 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1547 shutil.move(source_dir, destination_dir)
1548 return destination_dir
1549 return None
1551 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1552 """Downloads the build archive for the given revision.
1554 Args:
1555 revision: The SVN revision to build.
1556 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1558 Returns:
1559 True if download succeeds, otherwise False.
1561 patch_sha = None
1562 if patch:
1563 # Get the SHA of the DEPS changes patch.
1564 patch_sha = GetSHA1HexDigest(patch)
1566 # Update the DEPS changes patch with a patch to create a new file named
1567 # 'DEPS.sha' and add patch_sha evaluated above to it.
1568 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1570 # Source archive file path on cloud storage.
1571 source_file = GetRemoteBuildPath(revision, self.opts.target_arch, patch_sha)
1573 # Get Build output directory
1574 abs_build_dir = os.path.abspath(
1575 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1576 # Downloaded archive file path.
1577 downloaded_file = os.path.join(
1578 abs_build_dir,
1579 GetZipFileName(revision, self.opts.target_arch, patch_sha))
1581 fetch_build_func = lambda: FetchFromCloudStorage(self.opts.gs_bucket,
1582 source_file,
1583 abs_build_dir)
1585 if not fetch_build_func():
1586 if not self.PostBuildRequestAndWait(revision,
1587 fetch_build=fetch_build_func,
1588 patch=patch):
1589 raise RuntimeError('Somewthing went wrong while processing build'
1590 'request for: %s' % revision)
1591 # Generic name for the archive, created when archive file is extracted.
1592 output_dir = os.path.join(
1593 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1594 # Unzip build archive directory.
1595 try:
1596 RmTreeAndMkDir(output_dir, skip_makedir=True)
1597 ExtractZip(downloaded_file, abs_build_dir)
1598 if os.path.exists(output_dir):
1599 self.BackupOrRestoreOutputdirectory(restore=False)
1600 # Build output directory based on target(e.g. out/Release, out/Debug).
1601 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1602 print 'Moving build from %s to %s' % (
1603 output_dir, target_build_output_dir)
1604 shutil.move(output_dir, target_build_output_dir)
1605 return True
1606 raise IOError('Missing extracted folder %s ' % output_dir)
1607 except Exception as e:
1608 print 'Somewthing went wrong while extracting archive file: %s' % e
1609 self.BackupOrRestoreOutputdirectory(restore=True)
1610 # Cleanup any leftovers from unzipping.
1611 if os.path.exists(output_dir):
1612 RmTreeAndMkDir(output_dir, skip_makedir=True)
1613 finally:
1614 # Delete downloaded archive
1615 if os.path.exists(downloaded_file):
1616 os.remove(downloaded_file)
1617 return False
1619 def WaitUntilBuildIsReady(self, fetch_build, bot_name, builder_host,
1620 builder_port, build_request_id, max_timeout):
1621 """Waits until build is produced by bisect builder on tryserver.
1623 Args:
1624 fetch_build: Function to check and download build from cloud storage.
1625 bot_name: Builder bot name on tryserver.
1626 builder_host Tryserver hostname.
1627 builder_port: Tryserver port.
1628 build_request_id: A unique ID of the build request posted to tryserver.
1629 max_timeout: Maximum time to wait for the build.
1631 Returns:
1632 True if build exists and download is successful, otherwise throws
1633 RuntimeError exception when time elapse.
1635 # Build number on the tryserver.
1636 build_num = None
1637 # Interval to check build on cloud storage.
1638 poll_interval = 60
1639 # Interval to check build status on tryserver.
1640 status_check_interval = 600
1641 last_status_check = time.time()
1642 start_time = time.time()
1643 while True:
1644 # Checks for build on gs://chrome-perf and download if exists.
1645 res = fetch_build()
1646 if res:
1647 return (res, 'Build successfully found')
1648 elapsed_status_check = time.time() - last_status_check
1649 # To avoid overloading tryserver with status check requests, we check
1650 # build status for every 10 mins.
1651 if elapsed_status_check > status_check_interval:
1652 last_status_check = time.time()
1653 if not build_num:
1654 # Get the build number on tryserver for the current build.
1655 build_num = bisect_builder.GetBuildNumFromBuilder(
1656 build_request_id, bot_name, builder_host, builder_port)
1657 # Check the status of build using the build number.
1658 # Note: Build is treated as PENDING if build number is not found
1659 # on the the tryserver.
1660 build_status, status_link = bisect_builder.GetBuildStatus(
1661 build_num, bot_name, builder_host, builder_port)
1662 if build_status == bisect_builder.FAILED:
1663 return (False, 'Failed to produce build, log: %s' % status_link)
1664 elapsed_time = time.time() - start_time
1665 if elapsed_time > max_timeout:
1666 return (False, 'Timed out: %ss without build' % max_timeout)
1668 print 'Time elapsed: %ss without build.' % elapsed_time
1669 time.sleep(poll_interval)
1671 def PostBuildRequestAndWait(self, revision, fetch_build, patch=None):
1672 """POSTs the build request job to the tryserver instance."""
1674 def GetBuilderNameAndBuildTime(target_arch='ia32'):
1675 """Gets builder bot name and buildtime in seconds based on platform."""
1676 # Bot names should match the one listed in tryserver.chromium's
1677 # master.cfg which produces builds for bisect.
1678 if IsWindows():
1679 if Is64BitWindows() and target_arch == 'x64':
1680 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1681 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1682 if IsLinux():
1683 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1684 if IsMac():
1685 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1686 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1687 if not fetch_build:
1688 return False
1690 bot_name, build_timeout = GetBuilderNameAndBuildTime(self.opts.target_arch)
1691 builder_host = self.opts.builder_host
1692 builder_port = self.opts.builder_port
1693 # Create a unique ID for each build request posted to tryserver builders.
1694 # This ID is added to "Reason" property in build's json.
1695 build_request_id = GetSHA1HexDigest(
1696 '%s-%s-%s' % (revision, patch, time.time()))
1698 # Creates a try job description.
1699 job_args = {'host': builder_host,
1700 'port': builder_port,
1701 'revision': 'src@%s' % revision,
1702 'bot': bot_name,
1703 'name': build_request_id
1705 # Update patch information if supplied.
1706 if patch:
1707 job_args['patch'] = patch
1708 # Posts job to build the revision on the server.
1709 if bisect_builder.PostTryJob(job_args):
1710 status, error_msg = self.WaitUntilBuildIsReady(fetch_build,
1711 bot_name,
1712 builder_host,
1713 builder_port,
1714 build_request_id,
1715 build_timeout)
1716 if not status:
1717 raise RuntimeError('%s [revision: %s]' % (error_msg, revision))
1718 return True
1719 return False
1721 def IsDownloadable(self, depot):
1722 """Checks if build is downloadable based on target platform and depot."""
1723 if self.opts.target_platform in ['chromium'] and self.opts.gs_bucket:
1724 return (depot == 'chromium' or
1725 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1726 'v8' in DEPOT_DEPS_NAME[depot]['from'])
1727 return False
1729 def UpdateDeps(self, revision, depot, deps_file):
1730 """Updates DEPS file with new revision of dependency repository.
1732 This method search DEPS for a particular pattern in which depot revision
1733 is specified (e.g "webkit_revision": "123456"). If a match is found then
1734 it resolves the given git hash to SVN revision and replace it in DEPS file.
1736 Args:
1737 revision: A git hash revision of the dependency repository.
1738 depot: Current depot being bisected.
1739 deps_file: Path to DEPS file.
1741 Returns:
1742 True if DEPS file is modified successfully, otherwise False.
1744 if not os.path.exists(deps_file):
1745 return False
1747 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1748 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1749 if not deps_var:
1750 print 'DEPS update not supported for Depot: %s', depot
1751 return False
1753 # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1754 # contains "angle_revision" key that holds git hash instead of SVN revision.
1755 # And sometime "angle_revision" key is not specified in "vars" variable,
1756 # in such cases check "deps" dictionary variable that matches
1757 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1758 if depot == 'angle':
1759 return self.UpdateDEPSForAngle(revision, depot, deps_file)
1761 try:
1762 deps_contents = ReadStringFromFile(deps_file)
1763 # Check whether the depot and revision pattern in DEPS file vars
1764 # e.g. for webkit the format is "webkit_revision": "12345".
1765 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1766 re.MULTILINE)
1767 match = re.search(deps_revision, deps_contents)
1768 if match:
1769 svn_revision = self.source_control.SVNFindRev(
1770 revision, self._GetDepotDirectory(depot))
1771 if not svn_revision:
1772 print 'Could not determine SVN revision for %s' % revision
1773 return False
1774 # Update the revision information for the given depot
1775 new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1777 # For v8_bleeding_edge revisions change V8 branch in order
1778 # to fetch bleeding edge revision.
1779 if depot == 'v8_bleeding_edge':
1780 new_data = self.UpdateV8Branch(new_data)
1781 if not new_data:
1782 return False
1783 # Write changes to DEPS file
1784 WriteStringToFile(new_data, deps_file)
1785 return True
1786 except IOError, e:
1787 print 'Something went wrong while updating DEPS file. [%s]' % e
1788 return False
1790 def UpdateV8Branch(self, deps_content):
1791 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1793 Check for "v8_branch" in DEPS file if exists update its value
1794 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1795 variable from DEPS revision 254916, therefore check for "src/v8":
1796 <v8 source path> in DEPS in order to support prior DEPS revisions
1797 and update it.
1799 Args:
1800 deps_content: DEPS file contents to be modified.
1802 Returns:
1803 Modified DEPS file contents as a string.
1805 new_branch = r'branches/bleeding_edge'
1806 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
1807 if re.search(v8_branch_pattern, deps_content):
1808 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
1809 else:
1810 # Replaces the branch assigned to "src/v8" key in DEPS file.
1811 # Format of "src/v8" in DEPS:
1812 # "src/v8":
1813 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1814 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1815 v8_src_pattern = re.compile(
1816 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
1817 if re.search(v8_src_pattern, deps_content):
1818 deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
1819 return deps_content
1821 def UpdateDEPSForAngle(self, revision, depot, deps_file):
1822 """Updates DEPS file with new revision for Angle repository.
1824 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1825 variable contains "angle_revision" key that holds git hash instead of
1826 SVN revision.
1828 And sometimes "angle_revision" key is not specified in "vars" variable,
1829 in such cases check "deps" dictionary variable that matches
1830 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1832 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1833 try:
1834 deps_contents = ReadStringFromFile(deps_file)
1835 # Check whether the depot and revision pattern in DEPS file vars variable
1836 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1837 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1838 deps_var, re.MULTILINE)
1839 match = re.search(angle_rev_pattern % deps_var, deps_contents)
1840 if match:
1841 # Update the revision information for the given depot
1842 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1843 else:
1844 # Check whether the depot and revision pattern in DEPS file deps
1845 # variable. e.g.,
1846 # "src/third_party/angle": Var("chromium_git") +
1847 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1848 angle_rev_pattern = re.compile(
1849 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
1850 match = re.search(angle_rev_pattern, deps_contents)
1851 if not match:
1852 print 'Could not find angle revision information in DEPS file.'
1853 return False
1854 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1855 # Write changes to DEPS file
1856 WriteStringToFile(new_data, deps_file)
1857 return True
1858 except IOError, e:
1859 print 'Something went wrong while updating DEPS file, %s' % e
1860 return False
1862 def CreateDEPSPatch(self, depot, revision):
1863 """Modifies DEPS and returns diff as text.
1865 Args:
1866 depot: Current depot being bisected.
1867 revision: A git hash revision of the dependency repository.
1869 Returns:
1870 A tuple with git hash of chromium revision and DEPS patch text.
1872 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1873 if not os.path.exists(deps_file_path):
1874 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1875 # Get current chromium revision (git hash).
1876 chromium_sha = CheckRunGit(['rev-parse', 'HEAD']).strip()
1877 if not chromium_sha:
1878 raise RuntimeError('Failed to determine Chromium revision for %s' %
1879 revision)
1880 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1881 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1882 # Checkout DEPS file for the current chromium revision.
1883 if self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1884 chromium_sha,
1885 cwd=self.src_cwd):
1886 if self.UpdateDeps(revision, depot, deps_file_path):
1887 diff_command = ['diff',
1888 '--src-prefix=src/',
1889 '--dst-prefix=src/',
1890 '--no-ext-diff',
1891 bisect_utils.FILE_DEPS]
1892 diff_text = CheckRunGit(diff_command, cwd=self.src_cwd)
1893 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1894 else:
1895 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
1896 chromium_sha)
1897 else:
1898 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
1899 chromium_sha)
1900 return (None, None)
1902 def BuildCurrentRevision(self, depot, revision=None):
1903 """Builds chrome and performance_ui_tests on the current revision.
1905 Returns:
1906 True if the build was successful.
1908 if self.opts.debug_ignore_build:
1909 return True
1910 cwd = os.getcwd()
1911 os.chdir(self.src_cwd)
1912 # Fetch build archive for the given revision from the cloud storage when
1913 # the storage bucket is passed.
1914 if self.IsDownloadable(depot) and revision:
1915 deps_patch = None
1916 if depot != 'chromium':
1917 # Create a DEPS patch with new revision for dependency repository.
1918 (revision, deps_patch) = self.CreateDEPSPatch(depot, revision)
1919 # Get SVN revision for the given SHA, since builds are archived using SVN
1920 # revision.
1921 chromium_revision = self.source_control.SVNFindRev(revision)
1922 if not chromium_revision:
1923 raise RuntimeError(
1924 'Failed to determine SVN revision for %s' % revision)
1925 if self.DownloadCurrentBuild(chromium_revision, patch=deps_patch):
1926 os.chdir(cwd)
1927 if deps_patch:
1928 # Reverts the changes to DEPS file.
1929 self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1930 revision,
1931 cwd=self.src_cwd)
1932 return True
1933 raise RuntimeError('Failed to download build archive for revision %s.\n'
1934 'Unfortunately, bisection couldn\'t continue any '
1935 'further. Please try running script without '
1936 '--gs_bucket flag to produce local builds.' % revision)
1939 build_success = self.builder.Build(depot, self.opts)
1940 os.chdir(cwd)
1941 return build_success
1943 def RunGClientHooks(self):
1944 """Runs gclient with runhooks command.
1946 Returns:
1947 True if gclient reports no errors.
1950 if self.opts.debug_ignore_build:
1951 return True
1953 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1955 def TryParseHistogramValuesFromOutput(self, metric, text):
1956 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1958 Args:
1959 metric: The metric as a list of [<trace>, <value>] strings.
1960 text: The text to parse the metric values from.
1962 Returns:
1963 A list of floating point numbers found.
1965 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
1967 text_lines = text.split('\n')
1968 values_list = []
1970 for current_line in text_lines:
1971 if metric_formatted in current_line:
1972 current_line = current_line[len(metric_formatted):]
1974 try:
1975 histogram_values = eval(current_line)
1977 for b in histogram_values['buckets']:
1978 average_for_bucket = float(b['high'] + b['low']) * 0.5
1979 # Extends the list with N-elements with the average for that bucket.
1980 values_list.extend([average_for_bucket] * b['count'])
1981 except:
1982 pass
1984 return values_list
1986 def TryParseResultValuesFromOutput(self, metric, text):
1987 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
1989 Args:
1990 metric: The metric as a list of [<trace>, <value>] strings.
1991 text: The text to parse the metric values from.
1993 Returns:
1994 A list of floating point numbers found.
1996 # Format is: RESULT <graph>: <trace>= <value> <units>
1997 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
1999 # The log will be parsed looking for format:
2000 # <*>RESULT <graph_name>: <trace_name>= <value>
2001 single_result_re = re.compile(
2002 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
2004 # The log will be parsed looking for format:
2005 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
2006 multi_results_re = re.compile(
2007 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
2009 # The log will be parsed looking for format:
2010 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
2011 mean_stddev_re = re.compile(
2012 metric_re +
2013 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
2015 text_lines = text.split('\n')
2016 values_list = []
2017 for current_line in text_lines:
2018 # Parse the output from the performance test for the metric we're
2019 # interested in.
2020 single_result_match = single_result_re.search(current_line)
2021 multi_results_match = multi_results_re.search(current_line)
2022 mean_stddev_match = mean_stddev_re.search(current_line)
2023 if (not single_result_match is None and
2024 single_result_match.group('VALUE')):
2025 values_list += [single_result_match.group('VALUE')]
2026 elif (not multi_results_match is None and
2027 multi_results_match.group('VALUES')):
2028 metric_values = multi_results_match.group('VALUES')
2029 values_list += metric_values.split(',')
2030 elif (not mean_stddev_match is None and
2031 mean_stddev_match.group('MEAN')):
2032 values_list += [mean_stddev_match.group('MEAN')]
2034 values_list = [float(v) for v in values_list if IsStringFloat(v)]
2036 # If the metric is times/t, we need to sum the timings in order to get
2037 # similar regression results as the try-bots.
2038 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
2039 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
2041 if metric in metrics_to_sum:
2042 if values_list:
2043 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
2045 return values_list
2047 def ParseMetricValuesFromOutput(self, metric, text):
2048 """Parses output from performance_ui_tests and retrieves the results for
2049 a given metric.
2051 Args:
2052 metric: The metric as a list of [<trace>, <value>] strings.
2053 text: The text to parse the metric values from.
2055 Returns:
2056 A list of floating point numbers found.
2058 metric_values = self.TryParseResultValuesFromOutput(metric, text)
2060 if not metric_values:
2061 metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
2063 return metric_values
2065 def _GenerateProfileIfNecessary(self, command_args):
2066 """Checks the command line of the performance test for dependencies on
2067 profile generation, and runs tools/perf/generate_profile as necessary.
2069 Args:
2070 command_args: Command line being passed to performance test, as a list.
2072 Returns:
2073 False if profile generation was necessary and failed, otherwise True.
2076 if '--profile-dir' in ' '.join(command_args):
2077 # If we were using python 2.7+, we could just use the argparse
2078 # module's parse_known_args to grab --profile-dir. Since some of the
2079 # bots still run 2.6, have to grab the arguments manually.
2080 arg_dict = {}
2081 args_to_parse = ['--profile-dir', '--browser']
2083 for arg_to_parse in args_to_parse:
2084 for i, current_arg in enumerate(command_args):
2085 if arg_to_parse in current_arg:
2086 current_arg_split = current_arg.split('=')
2088 # Check 2 cases, --arg=<val> and --arg <val>
2089 if len(current_arg_split) == 2:
2090 arg_dict[arg_to_parse] = current_arg_split[1]
2091 elif i + 1 < len(command_args):
2092 arg_dict[arg_to_parse] = command_args[i+1]
2094 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
2096 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
2097 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
2098 return not RunProcess(['python', path_to_generate,
2099 '--profile-type-to-generate', profile_type,
2100 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
2101 return False
2102 return True
2104 def _IsBisectModeUsingMetric(self):
2105 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
2107 def _IsBisectModeReturnCode(self):
2108 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
2110 def _IsBisectModeStandardDeviation(self):
2111 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
2113 def RunPerformanceTestAndParseResults(
2114 self, command_to_run, metric, reset_on_first_run=False,
2115 upload_on_last_run=False, results_label=None):
2116 """Runs a performance test on the current revision and parses the results.
2118 Args:
2119 command_to_run: The command to be run to execute the performance test.
2120 metric: The metric to parse out from the results of the performance test.
2121 This is the result chart name and trace name, separated by slash.
2122 reset_on_first_run: If True, pass the flag --reset-results on first run.
2123 upload_on_last_run: If True, pass the flag --upload-results on last run.
2124 results_label: A value for the option flag --results-label.
2125 The arguments reset_on_first_run, upload_on_last_run and results_label
2126 are all ignored if the test is not a Telemetry test.
2128 Returns:
2129 (values dict, 0) if --debug_ignore_perf_test was passed.
2130 (values dict, 0, test output) if the test was run successfully.
2131 (error message, -1) if the test couldn't be run.
2132 (error message, -1, test output) if the test ran but there was an error.
2134 success_code, failure_code = 0, -1
2136 if self.opts.debug_ignore_perf_test:
2137 fake_results = {
2138 'mean': 0.0,
2139 'std_err': 0.0,
2140 'std_dev': 0.0,
2141 'values': [0.0]
2143 return (fake_results, success_code)
2145 # For Windows platform set posix=False, to parse windows paths correctly.
2146 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
2147 # refer to http://bugs.python.org/issue1724822. By default posix=True.
2148 args = shlex.split(command_to_run, posix=not IsWindows())
2150 if not self._GenerateProfileIfNecessary(args):
2151 err_text = 'Failed to generate profile for performance test.'
2152 return (err_text, failure_code)
2154 # If running a Telemetry test for Chrome OS, insert the remote IP and
2155 # identity parameters.
2156 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
2157 if self.opts.target_platform == 'cros' and is_telemetry:
2158 args.append('--remote=%s' % self.opts.cros_remote_ip)
2159 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
2161 start_time = time.time()
2163 metric_values = []
2164 output_of_all_runs = ''
2165 for i in xrange(self.opts.repeat_test_count):
2166 # Can ignore the return code since if the tests fail, it won't return 0.
2167 current_args = copy.copy(args)
2168 if is_telemetry:
2169 if i == 0 and reset_on_first_run:
2170 current_args.append('--reset-results')
2171 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
2172 current_args.append('--upload-results')
2173 if results_label:
2174 current_args.append('--results-label=%s' % results_label)
2175 try:
2176 (output, return_code) = RunProcessAndRetrieveOutput(current_args,
2177 cwd=self.src_cwd)
2178 except OSError, e:
2179 if e.errno == errno.ENOENT:
2180 err_text = ('Something went wrong running the performance test. '
2181 'Please review the command line:\n\n')
2182 if 'src/' in ' '.join(args):
2183 err_text += ('Check that you haven\'t accidentally specified a '
2184 'path with src/ in the command.\n\n')
2185 err_text += ' '.join(args)
2186 err_text += '\n'
2188 return (err_text, failure_code)
2189 raise
2191 output_of_all_runs += output
2192 if self.opts.output_buildbot_annotations:
2193 print output
2195 if self._IsBisectModeUsingMetric():
2196 metric_values += self.ParseMetricValuesFromOutput(metric, output)
2197 # If we're bisecting on a metric (ie, changes in the mean or
2198 # standard deviation) and no metric values are produced, bail out.
2199 if not metric_values:
2200 break
2201 elif self._IsBisectModeReturnCode():
2202 metric_values.append(return_code)
2204 elapsed_minutes = (time.time() - start_time) / 60.0
2205 if elapsed_minutes >= self.opts.max_time_minutes:
2206 break
2208 if len(metric_values) == 0:
2209 err_text = 'Metric %s was not found in the test output.' % metric
2210 # TODO(qyearsley): Consider also getting and displaying a list of metrics
2211 # that were found in the output here.
2212 return (err_text, failure_code, output_of_all_runs)
2214 # If we're bisecting on return codes, we're really just looking for zero vs
2215 # non-zero.
2216 if self._IsBisectModeReturnCode():
2217 # If any of the return codes is non-zero, output 1.
2218 overall_return_code = 0 if (
2219 all(current_value == 0 for current_value in metric_values)) else 1
2221 values = {
2222 'mean': overall_return_code,
2223 'std_err': 0.0,
2224 'std_dev': 0.0,
2225 'values': metric_values,
2228 print 'Results of performance test: Command returned with %d' % (
2229 overall_return_code)
2230 print
2231 else:
2232 # Need to get the average value if there were multiple values.
2233 truncated_mean = CalculateTruncatedMean(metric_values,
2234 self.opts.truncate_percent)
2235 standard_err = CalculateStandardError(metric_values)
2236 standard_dev = CalculateStandardDeviation(metric_values)
2238 if self._IsBisectModeStandardDeviation():
2239 metric_values = [standard_dev]
2241 values = {
2242 'mean': truncated_mean,
2243 'std_err': standard_err,
2244 'std_dev': standard_dev,
2245 'values': metric_values,
2248 print 'Results of performance test: %12f %12f' % (
2249 truncated_mean, standard_err)
2250 print
2251 return (values, success_code, output_of_all_runs)
2253 def FindAllRevisionsToSync(self, revision, depot):
2254 """Finds all dependant revisions and depots that need to be synced for a
2255 given revision. This is only useful in the git workflow, as an svn depot
2256 may be split into multiple mirrors.
2258 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2259 skia/include. To sync skia/src properly, one has to find the proper
2260 revisions in skia/gyp and skia/include.
2262 Args:
2263 revision: The revision to sync to.
2264 depot: The depot in use at the moment (probably skia).
2266 Returns:
2267 A list of [depot, revision] pairs that need to be synced.
2269 revisions_to_sync = [[depot, revision]]
2271 is_base = ((depot == 'chromium') or (depot == 'cros') or
2272 (depot == 'android-chrome'))
2274 # Some SVN depots were split into multiple git depots, so we need to
2275 # figure out for each mirror which git revision to grab. There's no
2276 # guarantee that the SVN revision will exist for each of the dependant
2277 # depots, so we have to grep the git logs and grab the next earlier one.
2278 if not is_base and\
2279 DEPOT_DEPS_NAME[depot]['depends'] and\
2280 self.source_control.IsGit():
2281 svn_rev = self.source_control.SVNFindRev(revision)
2283 for d in DEPOT_DEPS_NAME[depot]['depends']:
2284 self.ChangeToDepotWorkingDirectory(d)
2286 dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
2288 if dependant_rev:
2289 revisions_to_sync.append([d, dependant_rev])
2291 num_resolved = len(revisions_to_sync)
2292 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
2294 self.ChangeToDepotWorkingDirectory(depot)
2296 if not ((num_resolved - 1) == num_needed):
2297 return None
2299 return revisions_to_sync
2301 def PerformPreBuildCleanup(self):
2302 """Performs necessary cleanup between runs."""
2303 print 'Cleaning up between runs.'
2304 print
2306 # Having these pyc files around between runs can confuse the
2307 # perf tests and cause them to crash.
2308 for (path, _, files) in os.walk(self.src_cwd):
2309 for cur_file in files:
2310 if cur_file.endswith('.pyc'):
2311 path_to_file = os.path.join(path, cur_file)
2312 os.remove(path_to_file)
2314 def PerformWebkitDirectoryCleanup(self, revision):
2315 """If the script is switching between Blink and WebKit during bisect,
2316 its faster to just delete the directory rather than leave it up to git
2317 to sync.
2319 Returns:
2320 True if successful.
2322 if not self.source_control.CheckoutFileAtRevision(
2323 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
2324 return False
2326 cwd = os.getcwd()
2327 os.chdir(self.src_cwd)
2329 is_blink = bisect_utils.IsDepsFileBlink()
2331 os.chdir(cwd)
2333 if not self.source_control.RevertFileToHead(
2334 bisect_utils.FILE_DEPS_GIT):
2335 return False
2337 if self.was_blink != is_blink:
2338 self.was_blink = is_blink
2339 # Removes third_party/Webkit directory.
2340 return bisect_utils.RemoveThirdPartyDirectory('Webkit')
2341 return True
2343 def PerformCrosChrootCleanup(self):
2344 """Deletes the chroot.
2346 Returns:
2347 True if successful.
2349 cwd = os.getcwd()
2350 self.ChangeToDepotWorkingDirectory('cros')
2351 cmd = [CROS_SDK_PATH, '--delete']
2352 return_code = RunProcess(cmd)
2353 os.chdir(cwd)
2354 return not return_code
2356 def CreateCrosChroot(self):
2357 """Creates a new chroot.
2359 Returns:
2360 True if successful.
2362 cwd = os.getcwd()
2363 self.ChangeToDepotWorkingDirectory('cros')
2364 cmd = [CROS_SDK_PATH, '--create']
2365 return_code = RunProcess(cmd)
2366 os.chdir(cwd)
2367 return not return_code
2369 def PerformPreSyncCleanup(self, revision, depot):
2370 """Performs any necessary cleanup before syncing.
2372 Returns:
2373 True if successful.
2375 if depot == 'chromium':
2376 # Removes third_party/libjingle. At some point, libjingle was causing
2377 # issues syncing when using the git workflow (crbug.com/266324).
2378 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'):
2379 return False
2380 # Removes third_party/skia. At some point, skia was causing
2381 # issues syncing when using the git workflow (crbug.com/377951).
2382 if not bisect_utils.RemoveThirdPartyDirectory('skia'):
2383 return False
2384 return self.PerformWebkitDirectoryCleanup(revision)
2385 elif depot == 'cros':
2386 return self.PerformCrosChrootCleanup()
2387 return True
2389 def RunPostSync(self, depot):
2390 """Performs any work after syncing.
2392 Returns:
2393 True if successful.
2395 if self.opts.target_platform == 'android':
2396 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
2397 path_to_src=self.src_cwd):
2398 return False
2400 if depot == 'cros':
2401 return self.CreateCrosChroot()
2402 else:
2403 return self.RunGClientHooks()
2404 return True
2406 def ShouldSkipRevision(self, depot, revision):
2407 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2408 is git based those changes would have no effect.
2410 Args:
2411 depot: The depot being bisected.
2412 revision: Current revision we're synced to.
2414 Returns:
2415 True if we should skip building/testing this revision.
2417 if depot == 'chromium':
2418 if self.source_control.IsGit():
2419 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
2420 output = CheckRunGit(cmd)
2422 files = output.splitlines()
2424 if len(files) == 1 and files[0] == 'DEPS':
2425 return True
2427 return False
2429 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
2430 skippable=False):
2431 """Performs a full sync/build/run of the specified revision.
2433 Args:
2434 revision: The revision to sync to.
2435 depot: The depot that's being used at the moment (src, webkit, etc.)
2436 command_to_run: The command to execute the performance test.
2437 metric: The performance metric being tested.
2439 Returns:
2440 On success, a tuple containing the results of the performance test.
2441 Otherwise, a tuple with the error message.
2443 sync_client = None
2444 if depot == 'chromium' or depot == 'android-chrome':
2445 sync_client = 'gclient'
2446 elif depot == 'cros':
2447 sync_client = 'repo'
2449 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
2451 if not revisions_to_sync:
2452 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
2454 if not self.PerformPreSyncCleanup(revision, depot):
2455 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
2457 success = True
2459 if not self.opts.debug_ignore_sync:
2460 for r in revisions_to_sync:
2461 self.ChangeToDepotWorkingDirectory(r[0])
2463 if sync_client:
2464 self.PerformPreBuildCleanup()
2466 # If you're using gclient to sync, you need to specify the depot you
2467 # want so that all the dependencies sync properly as well.
2468 # ie. gclient sync src@<SHA1>
2469 current_revision = r[1]
2470 if sync_client == 'gclient':
2471 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
2472 current_revision)
2473 if not self.source_control.SyncToRevision(current_revision,
2474 sync_client):
2475 success = False
2477 break
2479 if success:
2480 success = self.RunPostSync(depot)
2481 if success:
2482 if skippable and self.ShouldSkipRevision(depot, revision):
2483 return ('Skipped revision: [%s]' % str(revision),
2484 BUILD_RESULT_SKIPPED)
2486 start_build_time = time.time()
2487 if self.BuildCurrentRevision(depot, revision):
2488 after_build_time = time.time()
2489 results = self.RunPerformanceTestAndParseResults(command_to_run,
2490 metric)
2491 # Restore build output directory once the tests are done, to avoid
2492 # any descrepancy.
2493 if self.IsDownloadable(depot) and revision:
2494 self.BackupOrRestoreOutputdirectory(restore=True)
2496 if results[1] == 0:
2497 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
2498 depot, revision)
2500 if not external_revisions is None:
2501 return (results[0], results[1], external_revisions,
2502 time.time() - after_build_time, after_build_time -
2503 start_build_time)
2504 else:
2505 return ('Failed to parse DEPS file for external revisions.',
2506 BUILD_RESULT_FAIL)
2507 else:
2508 return results
2509 else:
2510 return ('Failed to build revision: [%s]' % (str(revision, )),
2511 BUILD_RESULT_FAIL)
2512 else:
2513 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
2514 else:
2515 return ('Failed to sync revision: [%s]' % (str(revision, )),
2516 BUILD_RESULT_FAIL)
2518 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2519 """Given known good and bad values, decide if the current_value passed
2520 or failed.
2522 Args:
2523 current_value: The value of the metric being checked.
2524 known_bad_value: The reference value for a "failed" run.
2525 known_good_value: The reference value for a "passed" run.
2527 Returns:
2528 True if the current_value is closer to the known_good_value than the
2529 known_bad_value.
2531 if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2532 dist_to_good_value = abs(current_value['std_dev'] -
2533 known_good_value['std_dev'])
2534 dist_to_bad_value = abs(current_value['std_dev'] -
2535 known_bad_value['std_dev'])
2536 else:
2537 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2538 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2540 return dist_to_good_value < dist_to_bad_value
2542 def _GetDepotDirectory(self, depot_name):
2543 if depot_name == 'chromium':
2544 return self.src_cwd
2545 elif depot_name == 'cros':
2546 return self.cros_cwd
2547 elif depot_name in DEPOT_NAMES:
2548 return self.depot_cwd[depot_name]
2549 else:
2550 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2551 ' was added without proper support?' % depot_name
2553 def ChangeToDepotWorkingDirectory(self, depot_name):
2554 """Given a depot, changes to the appropriate working directory.
2556 Args:
2557 depot_name: The name of the depot (see DEPOT_NAMES).
2559 os.chdir(self._GetDepotDirectory(depot_name))
2561 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2562 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2563 search_forward=True)
2564 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2565 search_forward=False)
2566 min_revision_data['external']['v8_bleeding_edge'] = r1
2567 max_revision_data['external']['v8_bleeding_edge'] = r2
2569 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2570 min_revision_data['revision']) or
2571 not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2572 max_revision_data['revision'])):
2573 self.warnings.append('Trunk revisions in V8 did not map directly to '
2574 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2575 'did map directly to bleeding_edge revisions, but results might not '
2576 'be valid.')
2578 def _FindNextDepotToBisect(self, current_depot, current_revision,
2579 min_revision_data, max_revision_data):
2580 """Given the state of the bisect, decides which depot the script should
2581 dive into next (if any).
2583 Args:
2584 current_depot: Current depot being bisected.
2585 current_revision: Current revision synced to.
2586 min_revision_data: Data about the earliest revision in the bisect range.
2587 max_revision_data: Data about the latest revision in the bisect range.
2589 Returns:
2590 The depot to bisect next, or None.
2592 external_depot = None
2593 for next_depot in DEPOT_NAMES:
2594 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2595 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2596 continue
2598 if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
2599 min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
2600 continue
2602 if current_depot == 'v8':
2603 # We grab the bleeding_edge info here rather than earlier because we
2604 # finally have the revision range. From that we can search forwards and
2605 # backwards to try to match trunk revisions to bleeding_edge.
2606 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2608 if (min_revision_data['external'].get(next_depot) ==
2609 max_revision_data['external'].get(next_depot)):
2610 continue
2612 if (min_revision_data['external'].get(next_depot) and
2613 max_revision_data['external'].get(next_depot)):
2614 external_depot = next_depot
2615 break
2617 return external_depot
2619 def PrepareToBisectOnDepot(self,
2620 current_depot,
2621 end_revision,
2622 start_revision,
2623 previous_depot,
2624 previous_revision):
2625 """Changes to the appropriate directory and gathers a list of revisions
2626 to bisect between |start_revision| and |end_revision|.
2628 Args:
2629 current_depot: The depot we want to bisect.
2630 end_revision: End of the revision range.
2631 start_revision: Start of the revision range.
2632 previous_depot: The depot we were previously bisecting.
2633 previous_revision: The last revision we synced to on |previous_depot|.
2635 Returns:
2636 A list containing the revisions between |start_revision| and
2637 |end_revision| inclusive.
2639 # Change into working directory of external library to run
2640 # subsequent commands.
2641 self.ChangeToDepotWorkingDirectory(current_depot)
2643 # V8 (and possibly others) is merged in periodically. Bisecting
2644 # this directory directly won't give much good info.
2645 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2646 config_path = os.path.join(self.src_cwd, '..')
2647 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2648 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2649 return []
2650 if bisect_utils.RunGClient(
2651 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2652 return []
2654 if current_depot == 'v8_bleeding_edge':
2655 self.ChangeToDepotWorkingDirectory('chromium')
2657 shutil.move('v8', 'v8.bak')
2658 shutil.move('v8_bleeding_edge', 'v8')
2660 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2661 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2663 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2664 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2666 self.ChangeToDepotWorkingDirectory(current_depot)
2668 depot_revision_list = self.GetRevisionList(current_depot,
2669 end_revision,
2670 start_revision)
2672 self.ChangeToDepotWorkingDirectory('chromium')
2674 return depot_revision_list
2676 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2677 """Gathers reference values by running the performance tests on the
2678 known good and bad revisions.
2680 Args:
2681 good_rev: The last known good revision where the performance regression
2682 has not occurred yet.
2683 bad_rev: A revision where the performance regression has already occurred.
2684 cmd: The command to execute the performance test.
2685 metric: The metric being tested for regression.
2687 Returns:
2688 A tuple with the results of building and running each revision.
2690 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
2691 target_depot,
2692 cmd,
2693 metric)
2695 good_run_results = None
2697 if not bad_run_results[1]:
2698 good_run_results = self.SyncBuildAndRunRevision(good_rev,
2699 target_depot,
2700 cmd,
2701 metric)
2703 return (bad_run_results, good_run_results)
2705 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
2706 """Adds new revisions to the revision_data dict and initializes them.
2708 Args:
2709 revisions: List of revisions to add.
2710 depot: Depot that's currently in use (src, webkit, etc...)
2711 sort: Sorting key for displaying revisions.
2712 revision_data: A dict to add the new revisions into. Existing revisions
2713 will have their sort keys offset.
2716 num_depot_revisions = len(revisions)
2718 for _, v in revision_data.iteritems():
2719 if v['sort'] > sort:
2720 v['sort'] += num_depot_revisions
2722 for i in xrange(num_depot_revisions):
2723 r = revisions[i]
2725 revision_data[r] = {'revision' : r,
2726 'depot' : depot,
2727 'value' : None,
2728 'perf_time' : 0,
2729 'build_time' : 0,
2730 'passed' : '?',
2731 'sort' : i + sort + 1}
2733 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2734 if self.opts.output_buildbot_annotations:
2735 step_name = 'Bisection Range: [%s - %s]' % (
2736 revision_list[len(revision_list)-1], revision_list[0])
2737 bisect_utils.OutputAnnotationStepStart(step_name)
2739 print
2740 print 'Revisions to bisect on [%s]:' % depot
2741 for revision_id in revision_list:
2742 print ' -> %s' % (revision_id, )
2743 print
2745 if self.opts.output_buildbot_annotations:
2746 bisect_utils.OutputAnnotationStepClosed()
2748 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2749 """Checks to see if changes to DEPS file occurred, and that the revision
2750 range also includes the change to .DEPS.git. If it doesn't, attempts to
2751 expand the revision range to include it.
2753 Args:
2754 bad_rev: First known bad revision.
2755 good_revision: Last known good revision.
2757 Returns:
2758 A tuple with the new bad and good revisions.
2760 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2761 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2762 'DEPS', good_revision, bad_revision)
2764 if changes_to_deps:
2765 # DEPS file was changed, search from the oldest change to DEPS file to
2766 # bad_revision to see if there are matching .DEPS.git changes.
2767 oldest_deps_change = changes_to_deps[-1]
2768 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2769 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2771 if len(changes_to_deps) != len(changes_to_gitdeps):
2772 # Grab the timestamp of the last DEPS change
2773 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2774 output = CheckRunGit(cmd)
2775 commit_time = int(output)
2777 # Try looking for a commit that touches the .DEPS.git file in the
2778 # next 15 minutes after the DEPS file change.
2779 cmd = ['log', '--format=%H', '-1',
2780 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2781 'origin/master', bisect_utils.FILE_DEPS_GIT]
2782 output = CheckRunGit(cmd)
2783 output = output.strip()
2784 if output:
2785 self.warnings.append('Detected change to DEPS and modified '
2786 'revision range to include change to .DEPS.git')
2787 return (output, good_revision)
2788 else:
2789 self.warnings.append('Detected change to DEPS but couldn\'t find '
2790 'matching change to .DEPS.git')
2791 return (bad_revision, good_revision)
2793 def CheckIfRevisionsInProperOrder(self,
2794 target_depot,
2795 good_revision,
2796 bad_revision):
2797 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2799 Args:
2800 good_revision: Number/tag of the known good revision.
2801 bad_revision: Number/tag of the known bad revision.
2803 Returns:
2804 True if the revisions are in the proper order (good earlier than bad).
2806 if self.source_control.IsGit() and target_depot != 'cros':
2807 cmd = ['log', '--format=%ct', '-1', good_revision]
2808 cwd = self._GetDepotDirectory(target_depot)
2810 output = CheckRunGit(cmd, cwd=cwd)
2811 good_commit_time = int(output)
2813 cmd = ['log', '--format=%ct', '-1', bad_revision]
2814 output = CheckRunGit(cmd, cwd=cwd)
2815 bad_commit_time = int(output)
2817 return good_commit_time <= bad_commit_time
2818 else:
2819 # Cros/svn use integers
2820 return int(good_revision) <= int(bad_revision)
2822 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2823 """Given known good and bad revisions, run a binary search on all
2824 intermediate revisions to determine the CL where the performance regression
2825 occurred.
2827 Args:
2828 command_to_run: Specify the command to execute the performance test.
2829 good_revision: Number/tag of the known good revision.
2830 bad_revision: Number/tag of the known bad revision.
2831 metric: The performance metric to monitor.
2833 Returns:
2834 A dict with 2 members, 'revision_data' and 'error'. On success,
2835 'revision_data' will contain a dict mapping revision ids to
2836 data about that revision. Each piece of revision data consists of a
2837 dict with the following keys:
2839 'passed': Represents whether the performance test was successful at
2840 that revision. Possible values include: 1 (passed), 0 (failed),
2841 '?' (skipped), 'F' (build failed).
2842 'depot': The depot that this revision is from (ie. WebKit)
2843 'external': If the revision is a 'src' revision, 'external' contains
2844 the revisions of each of the external libraries.
2845 'sort': A sort value for sorting the dict in order of commits.
2847 For example:
2849 'error':None,
2850 'revision_data':
2852 'CL #1':
2854 'passed':False,
2855 'depot':'chromium',
2856 'external':None,
2857 'sort':0
2862 If an error occurred, the 'error' field will contain the message and
2863 'revision_data' will be empty.
2865 results = {'revision_data' : {},
2866 'error' : None}
2868 # Choose depot to bisect first
2869 target_depot = 'chromium'
2870 if self.opts.target_platform == 'cros':
2871 target_depot = 'cros'
2872 elif self.opts.target_platform == 'android-chrome':
2873 target_depot = 'android-chrome'
2875 cwd = os.getcwd()
2876 self.ChangeToDepotWorkingDirectory(target_depot)
2878 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2879 bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2880 target_depot, 100)
2881 good_revision = self.source_control.ResolveToRevision(good_revision_in,
2882 target_depot, -100)
2884 os.chdir(cwd)
2887 if bad_revision is None:
2888 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2889 return results
2891 if good_revision is None:
2892 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2893 return results
2895 # Check that they didn't accidentally swap good and bad revisions.
2896 if not self.CheckIfRevisionsInProperOrder(
2897 target_depot, good_revision, bad_revision):
2898 results['error'] = 'bad_revision < good_revision, did you swap these '\
2899 'by mistake?'
2900 return results
2902 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2903 bad_revision, good_revision)
2905 if self.opts.output_buildbot_annotations:
2906 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2908 print 'Gathering revision range for bisection.'
2909 # Retrieve a list of revisions to do bisection on.
2910 src_revision_list = self.GetRevisionList(target_depot,
2911 bad_revision,
2912 good_revision)
2914 if self.opts.output_buildbot_annotations:
2915 bisect_utils.OutputAnnotationStepClosed()
2917 if src_revision_list:
2918 # revision_data will store information about a revision such as the
2919 # depot it came from, the webkit/V8 revision at that time,
2920 # performance timing, build state, etc...
2921 revision_data = results['revision_data']
2923 # revision_list is the list we're binary searching through at the moment.
2924 revision_list = []
2926 sort_key_ids = 0
2928 for current_revision_id in src_revision_list:
2929 sort_key_ids += 1
2931 revision_data[current_revision_id] = {'value' : None,
2932 'passed' : '?',
2933 'depot' : target_depot,
2934 'external' : None,
2935 'perf_time' : 0,
2936 'build_time' : 0,
2937 'sort' : sort_key_ids}
2938 revision_list.append(current_revision_id)
2940 min_revision = 0
2941 max_revision = len(revision_list) - 1
2943 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2945 if self.opts.output_buildbot_annotations:
2946 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2948 print 'Gathering reference values for bisection.'
2950 # Perform the performance tests on the good and bad revisions, to get
2951 # reference values.
2952 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
2953 bad_revision,
2954 command_to_run,
2955 metric,
2956 target_depot)
2958 if self.opts.output_buildbot_annotations:
2959 bisect_utils.OutputAnnotationStepClosed()
2961 if bad_results[1]:
2962 results['error'] = ('An error occurred while building and running '
2963 'the \'bad\' reference value. The bisect cannot continue without '
2964 'a working \'bad\' revision to start from.\n\nError: %s' %
2965 bad_results[0])
2966 return results
2968 if good_results[1]:
2969 results['error'] = ('An error occurred while building and running '
2970 'the \'good\' reference value. The bisect cannot continue without '
2971 'a working \'good\' revision to start from.\n\nError: %s' %
2972 good_results[0])
2973 return results
2976 # We need these reference values to determine if later runs should be
2977 # classified as pass or fail.
2978 known_bad_value = bad_results[0]
2979 known_good_value = good_results[0]
2981 # Can just mark the good and bad revisions explicitly here since we
2982 # already know the results.
2983 bad_revision_data = revision_data[revision_list[0]]
2984 bad_revision_data['external'] = bad_results[2]
2985 bad_revision_data['perf_time'] = bad_results[3]
2986 bad_revision_data['build_time'] = bad_results[4]
2987 bad_revision_data['passed'] = False
2988 bad_revision_data['value'] = known_bad_value
2990 good_revision_data = revision_data[revision_list[max_revision]]
2991 good_revision_data['external'] = good_results[2]
2992 good_revision_data['perf_time'] = good_results[3]
2993 good_revision_data['build_time'] = good_results[4]
2994 good_revision_data['passed'] = True
2995 good_revision_data['value'] = known_good_value
2997 next_revision_depot = target_depot
2999 while True:
3000 if not revision_list:
3001 break
3003 min_revision_data = revision_data[revision_list[min_revision]]
3004 max_revision_data = revision_data[revision_list[max_revision]]
3006 if max_revision - min_revision <= 1:
3007 current_depot = min_revision_data['depot']
3008 if min_revision_data['passed'] == '?':
3009 next_revision_index = min_revision
3010 elif max_revision_data['passed'] == '?':
3011 next_revision_index = max_revision
3012 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
3013 previous_revision = revision_list[min_revision]
3014 # If there were changes to any of the external libraries we track,
3015 # should bisect the changes there as well.
3016 external_depot = self._FindNextDepotToBisect(current_depot,
3017 previous_revision, min_revision_data, max_revision_data)
3019 # If there was no change in any of the external depots, the search
3020 # is over.
3021 if not external_depot:
3022 if current_depot == 'v8':
3023 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
3024 'continue any further. The script can only bisect into '
3025 'V8\'s bleeding_edge repository if both the current and '
3026 'previous revisions in trunk map directly to revisions in '
3027 'bleeding_edge.')
3028 break
3030 earliest_revision = max_revision_data['external'][external_depot]
3031 latest_revision = min_revision_data['external'][external_depot]
3033 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
3034 latest_revision,
3035 earliest_revision,
3036 next_revision_depot,
3037 previous_revision)
3039 if not new_revision_list:
3040 results['error'] = 'An error occurred attempting to retrieve'\
3041 ' revision range: [%s..%s]' % \
3042 (earliest_revision, latest_revision)
3043 return results
3045 self.AddRevisionsIntoRevisionData(new_revision_list,
3046 external_depot,
3047 min_revision_data['sort'],
3048 revision_data)
3050 # Reset the bisection and perform it on the newly inserted
3051 # changelists.
3052 revision_list = new_revision_list
3053 min_revision = 0
3054 max_revision = len(revision_list) - 1
3055 sort_key_ids += len(revision_list)
3057 print 'Regression in metric:%s appears to be the result of changes'\
3058 ' in [%s].' % (metric, external_depot)
3060 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
3062 continue
3063 else:
3064 break
3065 else:
3066 next_revision_index = int((max_revision - min_revision) / 2) +\
3067 min_revision
3069 next_revision_id = revision_list[next_revision_index]
3070 next_revision_data = revision_data[next_revision_id]
3071 next_revision_depot = next_revision_data['depot']
3073 self.ChangeToDepotWorkingDirectory(next_revision_depot)
3075 if self.opts.output_buildbot_annotations:
3076 step_name = 'Working on [%s]' % next_revision_id
3077 bisect_utils.OutputAnnotationStepStart(step_name)
3079 print 'Working on revision: [%s]' % next_revision_id
3081 run_results = self.SyncBuildAndRunRevision(next_revision_id,
3082 next_revision_depot,
3083 command_to_run,
3084 metric, skippable=True)
3086 # If the build is successful, check whether or not the metric
3087 # had regressed.
3088 if not run_results[1]:
3089 if len(run_results) > 2:
3090 next_revision_data['external'] = run_results[2]
3091 next_revision_data['perf_time'] = run_results[3]
3092 next_revision_data['build_time'] = run_results[4]
3094 passed_regression = self._CheckIfRunPassed(run_results[0],
3095 known_good_value,
3096 known_bad_value)
3098 next_revision_data['passed'] = passed_regression
3099 next_revision_data['value'] = run_results[0]
3101 if passed_regression:
3102 max_revision = next_revision_index
3103 else:
3104 min_revision = next_revision_index
3105 else:
3106 if run_results[1] == BUILD_RESULT_SKIPPED:
3107 next_revision_data['passed'] = 'Skipped'
3108 elif run_results[1] == BUILD_RESULT_FAIL:
3109 next_revision_data['passed'] = 'Build Failed'
3111 print run_results[0]
3113 # If the build is broken, remove it and redo search.
3114 revision_list.pop(next_revision_index)
3116 max_revision -= 1
3118 if self.opts.output_buildbot_annotations:
3119 self._PrintPartialResults(results)
3120 bisect_utils.OutputAnnotationStepClosed()
3121 else:
3122 # Weren't able to sync and retrieve the revision range.
3123 results['error'] = 'An error occurred attempting to retrieve revision '\
3124 'range: [%s..%s]' % (good_revision, bad_revision)
3126 return results
3128 def _PrintPartialResults(self, results_dict):
3129 revision_data = results_dict['revision_data']
3130 revision_data_sorted = sorted(revision_data.iteritems(),
3131 key = lambda x: x[1]['sort'])
3132 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3134 self._PrintTestedCommitsTable(revision_data_sorted,
3135 results_dict['first_working_revision'],
3136 results_dict['last_broken_revision'],
3137 100, final_step=False)
3139 def _PrintConfidence(self, results_dict):
3140 # The perf dashboard specifically looks for the string
3141 # "Confidence in Bisection Results: 100%" to decide whether or not
3142 # to cc the author(s). If you change this, please update the perf
3143 # dashboard as well.
3144 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
3146 def _PrintBanner(self, results_dict):
3147 print
3148 print " __o_\___ Aw Snap! We hit a speed bump!"
3149 print "=-O----O-'__.~.___________________________________"
3150 print
3151 if self._IsBisectModeReturnCode():
3152 print ('Bisect reproduced a change in return codes while running the '
3153 'performance test.')
3154 else:
3155 print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the '
3156 '%s metric.' % (results_dict['regression_size'],
3157 results_dict['regression_std_err'], '/'.join(self.opts.metric)))
3158 self._PrintConfidence(results_dict)
3160 def _PrintFailedBanner(self, results_dict):
3161 print
3162 if self._IsBisectModeReturnCode():
3163 print 'Bisect could not reproduce a change in the return code.'
3164 else:
3165 print ('Bisect could not reproduce a change in the '
3166 '%s metric.' % '/'.join(self.opts.metric))
3167 print
3169 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
3170 info = self.source_control.QueryRevisionInfo(cl,
3171 self._GetDepotDirectory(depot))
3172 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
3173 try:
3174 # Format is "git-svn-id: svn://....@123456 <other data>"
3175 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
3176 svn_revision = svn_line[0].split('@')
3177 svn_revision = svn_revision[1].split(' ')[0]
3178 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
3179 except IndexError:
3180 return ''
3181 return ''
3183 def _PrintRevisionInfo(self, cl, info, depot=None):
3184 # The perf dashboard specifically looks for the string
3185 # "Author : " to parse out who to cc on a bug. If you change the
3186 # formatting here, please update the perf dashboard as well.
3187 print
3188 print 'Subject : %s' % info['subject']
3189 print 'Author : %s' % info['author']
3190 if not info['email'].startswith(info['author']):
3191 print 'Email : %s' % info['email']
3192 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
3193 if commit_link:
3194 print 'Link : %s' % commit_link
3195 else:
3196 print
3197 print 'Failed to parse svn revision from body:'
3198 print
3199 print info['body']
3200 print
3201 print 'Commit : %s' % cl
3202 print 'Date : %s' % info['date']
3204 def _PrintTableRow(self, column_widths, row_data):
3205 assert len(column_widths) == len(row_data)
3207 text = ''
3208 for i in xrange(len(column_widths)):
3209 current_row_data = row_data[i].center(column_widths[i], ' ')
3210 text += ('%%%ds' % column_widths[i]) % current_row_data
3211 print text
3213 def _PrintTestedCommitsHeader(self):
3214 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3215 self._PrintTableRow(
3216 [20, 70, 14, 12, 13],
3217 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
3218 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3219 self._PrintTableRow(
3220 [20, 70, 14, 12, 13],
3221 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
3222 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3223 self._PrintTableRow(
3224 [20, 70, 14, 13],
3225 ['Depot', 'Commit SHA', 'Return Code', 'State'])
3226 else:
3227 assert False, "Invalid bisect_mode specified."
3228 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '),
3229 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '),
3230 'State'.center(13, ' '))
3232 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
3233 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3234 std_error = '+-%.02f' % current_data['value']['std_err']
3235 mean = '%.02f' % current_data['value']['mean']
3236 self._PrintTableRow(
3237 [20, 70, 12, 14, 13],
3238 [current_data['depot'], cl_link, mean, std_error, state_str])
3239 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3240 std_error = '+-%.02f' % current_data['value']['std_err']
3241 mean = '%.02f' % current_data['value']['mean']
3242 self._PrintTableRow(
3243 [20, 70, 12, 14, 13],
3244 [current_data['depot'], cl_link, std_error, mean, state_str])
3245 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3246 mean = '%d' % current_data['value']['mean']
3247 self._PrintTableRow(
3248 [20, 70, 14, 13],
3249 [current_data['depot'], cl_link, mean, state_str])
3251 def _PrintTestedCommitsTable(self, revision_data_sorted,
3252 first_working_revision, last_broken_revision, confidence,
3253 final_step=True):
3254 print
3255 if final_step:
3256 print 'Tested commits:'
3257 else:
3258 print 'Partial results:'
3259 self._PrintTestedCommitsHeader()
3260 state = 0
3261 for current_id, current_data in revision_data_sorted:
3262 if current_data['value']:
3263 if (current_id == last_broken_revision or
3264 current_id == first_working_revision):
3265 # If confidence is too low, don't add this empty line since it's
3266 # used to put focus on a suspected CL.
3267 if confidence and final_step:
3268 print
3269 state += 1
3270 if state == 2 and not final_step:
3271 # Just want a separation between "bad" and "good" cl's.
3272 print
3274 state_str = 'Bad'
3275 if state == 1 and final_step:
3276 state_str = 'Suspected CL'
3277 elif state == 2:
3278 state_str = 'Good'
3280 # If confidence is too low, don't bother outputting good/bad.
3281 if not confidence:
3282 state_str = ''
3283 state_str = state_str.center(13, ' ')
3285 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3286 current_data['depot'])
3287 if not cl_link:
3288 cl_link = current_id
3289 self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
3291 def _PrintReproSteps(self):
3292 print
3293 print 'To reproduce locally:'
3294 print '$ ' + self.opts.command
3295 if bisect_utils.IsTelemetryCommand(self.opts.command):
3296 print
3297 print 'Also consider passing --profiler=list to see available profilers.'
3299 def _PrintOtherRegressions(self, other_regressions, revision_data):
3300 print
3301 print 'Other regressions may have occurred:'
3302 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3303 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3304 for regression in other_regressions:
3305 current_id, previous_id, confidence = regression
3306 current_data = revision_data[current_id]
3307 previous_data = revision_data[previous_id]
3309 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3310 current_data['depot'])
3311 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
3312 previous_data['depot'])
3314 # If we can't map it to a viewable URL, at least show the original hash.
3315 if not current_link:
3316 current_link = current_id
3317 if not previous_link:
3318 previous_link = previous_id
3320 print ' %8s %70s %s' % (
3321 current_data['depot'], current_link,
3322 ('%d%%' % confidence).center(10, ' '))
3323 print ' %8s %70s' % (
3324 previous_data['depot'], previous_link)
3325 print
3327 def _PrintStepTime(self, revision_data_sorted):
3328 step_perf_time_avg = 0.0
3329 step_build_time_avg = 0.0
3330 step_count = 0.0
3331 for _, current_data in revision_data_sorted:
3332 if current_data['value']:
3333 step_perf_time_avg += current_data['perf_time']
3334 step_build_time_avg += current_data['build_time']
3335 step_count += 1
3336 if step_count:
3337 step_perf_time_avg = step_perf_time_avg / step_count
3338 step_build_time_avg = step_build_time_avg / step_count
3339 print
3340 print 'Average build time : %s' % datetime.timedelta(
3341 seconds=int(step_build_time_avg))
3342 print 'Average test time : %s' % datetime.timedelta(
3343 seconds=int(step_perf_time_avg))
3345 def _PrintWarnings(self):
3346 if not self.warnings:
3347 return
3348 print
3349 print 'WARNINGS:'
3350 for w in set(self.warnings):
3351 print ' !!! %s' % w
3353 def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
3354 other_regressions = []
3355 previous_values = []
3356 previous_id = None
3357 for current_id, current_data in revision_data_sorted:
3358 current_values = current_data['value']
3359 if current_values:
3360 current_values = current_values['values']
3361 if previous_values:
3362 confidence = CalculateConfidence(previous_values, [current_values])
3363 mean_of_prev_runs = CalculateMean(sum(previous_values, []))
3364 mean_of_current_runs = CalculateMean(current_values)
3366 # Check that the potential regression is in the same direction as
3367 # the overall regression. If the mean of the previous runs < the
3368 # mean of the current runs, this local regression is in same
3369 # direction.
3370 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
3371 is_same_direction = (prev_less_than_current if
3372 bad_greater_than_good else not prev_less_than_current)
3374 # Only report potential regressions with high confidence.
3375 if is_same_direction and confidence > 50:
3376 other_regressions.append([current_id, previous_id, confidence])
3377 previous_values.append(current_values)
3378 previous_id = current_id
3379 return other_regressions
3382 def _GetResultsDict(self, revision_data, revision_data_sorted):
3383 # Find range where it possibly broke.
3384 first_working_revision = None
3385 first_working_revision_index = -1
3386 last_broken_revision = None
3387 last_broken_revision_index = -1
3389 for i in xrange(len(revision_data_sorted)):
3390 k, v = revision_data_sorted[i]
3391 if v['passed'] == 1:
3392 if not first_working_revision:
3393 first_working_revision = k
3394 first_working_revision_index = i
3396 if not v['passed']:
3397 last_broken_revision = k
3398 last_broken_revision_index = i
3400 if last_broken_revision != None and first_working_revision != None:
3401 broken_means = []
3402 for i in xrange(0, last_broken_revision_index + 1):
3403 if revision_data_sorted[i][1]['value']:
3404 broken_means.append(revision_data_sorted[i][1]['value']['values'])
3406 working_means = []
3407 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
3408 if revision_data_sorted[i][1]['value']:
3409 working_means.append(revision_data_sorted[i][1]['value']['values'])
3411 # Flatten the lists to calculate mean of all values.
3412 working_mean = sum(working_means, [])
3413 broken_mean = sum(broken_means, [])
3415 # Calculate the approximate size of the regression
3416 mean_of_bad_runs = CalculateMean(broken_mean)
3417 mean_of_good_runs = CalculateMean(working_mean)
3419 regression_size = 100 * CalculateRelativeChange(mean_of_good_runs,
3420 mean_of_bad_runs)
3421 if math.isnan(regression_size):
3422 regression_size = 'zero-to-nonzero'
3424 regression_std_err = math.fabs(CalculatePooledStandardError(
3425 [working_mean, broken_mean]) /
3426 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
3428 # Give a "confidence" in the bisect. At the moment we use how distinct the
3429 # values are before and after the last broken revision, and how noisy the
3430 # overall graph is.
3431 confidence = CalculateConfidence(working_means, broken_means)
3433 culprit_revisions = []
3435 cwd = os.getcwd()
3436 self.ChangeToDepotWorkingDirectory(
3437 revision_data[last_broken_revision]['depot'])
3439 if revision_data[last_broken_revision]['depot'] == 'cros':
3440 # Want to get a list of all the commits and what depots they belong
3441 # to so that we can grab info about each.
3442 cmd = ['repo', 'forall', '-c',
3443 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3444 last_broken_revision, first_working_revision + 1)]
3445 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
3447 changes = []
3448 assert not return_code, 'An error occurred while running'\
3449 ' "%s"' % ' '.join(cmd)
3450 last_depot = None
3451 cwd = os.getcwd()
3452 for l in output.split('\n'):
3453 if l:
3454 # Output will be in form:
3455 # /path_to_depot
3456 # /path_to_other_depot
3457 # <SHA1>
3458 # /path_again
3459 # <SHA1>
3460 # etc.
3461 if l[0] == '/':
3462 last_depot = l
3463 else:
3464 contents = l.split(' ')
3465 if len(contents) > 1:
3466 changes.append([last_depot, contents[0]])
3467 for c in changes:
3468 os.chdir(c[0])
3469 info = self.source_control.QueryRevisionInfo(c[1])
3470 culprit_revisions.append((c[1], info, None))
3471 else:
3472 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
3473 k, v = revision_data_sorted[i]
3474 if k == first_working_revision:
3475 break
3476 self.ChangeToDepotWorkingDirectory(v['depot'])
3477 info = self.source_control.QueryRevisionInfo(k)
3478 culprit_revisions.append((k, info, v['depot']))
3479 os.chdir(cwd)
3481 # Check for any other possible regression ranges
3482 other_regressions = self._FindOtherRegressions(revision_data_sorted,
3483 mean_of_bad_runs > mean_of_good_runs)
3485 # Check for warnings:
3486 if len(culprit_revisions) > 1:
3487 self.warnings.append('Due to build errors, regression range could '
3488 'not be narrowed down to a single commit.')
3489 if self.opts.repeat_test_count == 1:
3490 self.warnings.append('Tests were only set to run once. This may '
3491 'be insufficient to get meaningful results.')
3492 if confidence < 100:
3493 if confidence:
3494 self.warnings.append(
3495 'Confidence is less than 100%. There could be other candidates for '
3496 'this regression. Try bisecting again with increased repeat_count '
3497 'or on a sub-metric that shows the regression more clearly.')
3498 else:
3499 self.warnings.append(
3500 'Confidence is 0%. Try bisecting again on another platform, with '
3501 'increased repeat_count or on a sub-metric that shows the regression '
3502 'more clearly.')
3504 return {
3505 'first_working_revision': first_working_revision,
3506 'last_broken_revision': last_broken_revision,
3507 'culprit_revisions': culprit_revisions,
3508 'other_regressions': other_regressions,
3509 'regression_size': regression_size,
3510 'regression_std_err': regression_std_err,
3511 'confidence': confidence,
3514 def FormatAndPrintResults(self, bisect_results):
3515 """Prints the results from a bisection run in a readable format.
3517 Args
3518 bisect_results: The results from a bisection test run.
3520 revision_data = bisect_results['revision_data']
3521 revision_data_sorted = sorted(revision_data.iteritems(),
3522 key = lambda x: x[1]['sort'])
3523 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3525 if self.opts.output_buildbot_annotations:
3526 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
3528 print
3529 print 'Full results of bisection:'
3530 for current_id, current_data in revision_data_sorted:
3531 build_status = current_data['passed']
3533 if type(build_status) is bool:
3534 if build_status:
3535 build_status = 'Good'
3536 else:
3537 build_status = 'Bad'
3539 print ' %20s %40s %s' % (current_data['depot'],
3540 current_id, build_status)
3541 print
3543 if self.opts.output_buildbot_annotations:
3544 bisect_utils.OutputAnnotationStepClosed()
3545 # The perf dashboard scrapes the "results" step in order to comment on
3546 # bugs. If you change this, please update the perf dashboard as well.
3547 bisect_utils.OutputAnnotationStepStart('Results')
3549 if results_dict['culprit_revisions'] and results_dict['confidence']:
3550 self._PrintBanner(results_dict)
3551 for culprit in results_dict['culprit_revisions']:
3552 cl, info, depot = culprit
3553 self._PrintRevisionInfo(cl, info, depot)
3554 self._PrintReproSteps()
3555 if results_dict['other_regressions']:
3556 self._PrintOtherRegressions(results_dict['other_regressions'],
3557 revision_data)
3558 else:
3559 self._PrintFailedBanner(results_dict)
3560 self._PrintReproSteps()
3562 self._PrintTestedCommitsTable(revision_data_sorted,
3563 results_dict['first_working_revision'],
3564 results_dict['last_broken_revision'],
3565 results_dict['confidence'])
3566 self._PrintStepTime(revision_data_sorted)
3567 self._PrintWarnings()
3569 if self.opts.output_buildbot_annotations:
3570 bisect_utils.OutputAnnotationStepClosed()
3573 def DetermineAndCreateSourceControl(opts):
3574 """Attempts to determine the underlying source control workflow and returns
3575 a SourceControl object.
3577 Returns:
3578 An instance of a SourceControl object, or None if the current workflow
3579 is unsupported.
3582 (output, _) = RunGit(['rev-parse', '--is-inside-work-tree'])
3584 if output.strip() == 'true':
3585 return GitSourceControl(opts)
3587 return None
3590 def IsPlatformSupported(opts):
3591 """Checks that this platform and build system are supported.
3593 Args:
3594 opts: The options parsed from the command line.
3596 Returns:
3597 True if the platform and build system are supported.
3599 # Haven't tested the script out on any other platforms yet.
3600 supported = ['posix', 'nt']
3601 return os.name in supported
3604 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3605 """Removes the directory tree specified, and then creates an empty
3606 directory in the same location (if not specified to skip).
3608 Args:
3609 path_to_dir: Path to the directory tree.
3610 skip_makedir: Whether to skip creating empty directory, default is False.
3612 Returns:
3613 True if successful, False if an error occurred.
3615 try:
3616 if os.path.exists(path_to_dir):
3617 shutil.rmtree(path_to_dir)
3618 except OSError, e:
3619 if e.errno != errno.ENOENT:
3620 return False
3622 if not skip_makedir:
3623 return MaybeMakeDirectory(path_to_dir)
3625 return True
3628 def RemoveBuildFiles(build_type):
3629 """Removes build files from previous runs."""
3630 if RmTreeAndMkDir(os.path.join('out', build_type)):
3631 if RmTreeAndMkDir(os.path.join('build', build_type)):
3632 return True
3633 return False
3636 class BisectOptions(object):
3637 """Options to be used when running bisection."""
3638 def __init__(self):
3639 super(BisectOptions, self).__init__()
3641 self.target_platform = 'chromium'
3642 self.build_preference = None
3643 self.good_revision = None
3644 self.bad_revision = None
3645 self.use_goma = None
3646 self.cros_board = None
3647 self.cros_remote_ip = None
3648 self.repeat_test_count = 20
3649 self.truncate_percent = 25
3650 self.max_time_minutes = 20
3651 self.metric = None
3652 self.command = None
3653 self.output_buildbot_annotations = None
3654 self.no_custom_deps = False
3655 self.working_directory = None
3656 self.extra_src = None
3657 self.debug_ignore_build = None
3658 self.debug_ignore_sync = None
3659 self.debug_ignore_perf_test = None
3660 self.gs_bucket = None
3661 self.target_arch = 'ia32'
3662 self.target_build_type = 'Release'
3663 self.builder_host = None
3664 self.builder_port = None
3665 self.bisect_mode = BISECT_MODE_MEAN
3667 def _CreateCommandLineParser(self):
3668 """Creates a parser with bisect options.
3670 Returns:
3671 An instance of optparse.OptionParser.
3673 usage = ('%prog [options] [-- chromium-options]\n'
3674 'Perform binary search on revision history to find a minimal '
3675 'range of revisions where a peformance metric regressed.\n')
3677 parser = optparse.OptionParser(usage=usage)
3679 group = optparse.OptionGroup(parser, 'Bisect options')
3680 group.add_option('-c', '--command',
3681 type='str',
3682 help='A command to execute your performance test at' +
3683 ' each point in the bisection.')
3684 group.add_option('-b', '--bad_revision',
3685 type='str',
3686 help='A bad revision to start bisection. ' +
3687 'Must be later than good revision. May be either a git' +
3688 ' or svn revision.')
3689 group.add_option('-g', '--good_revision',
3690 type='str',
3691 help='A revision to start bisection where performance' +
3692 ' test is known to pass. Must be earlier than the ' +
3693 'bad revision. May be either a git or svn revision.')
3694 group.add_option('-m', '--metric',
3695 type='str',
3696 help='The desired metric to bisect on. For example ' +
3697 '"vm_rss_final_b/vm_rss_f_b"')
3698 group.add_option('-r', '--repeat_test_count',
3699 type='int',
3700 default=20,
3701 help='The number of times to repeat the performance '
3702 'test. Values will be clamped to range [1, 100]. '
3703 'Default value is 20.')
3704 group.add_option('--max_time_minutes',
3705 type='int',
3706 default=20,
3707 help='The maximum time (in minutes) to take running the '
3708 'performance tests. The script will run the performance '
3709 'tests according to --repeat_test_count, so long as it '
3710 'doesn\'t exceed --max_time_minutes. Values will be '
3711 'clamped to range [1, 60].'
3712 'Default value is 20.')
3713 group.add_option('-t', '--truncate_percent',
3714 type='int',
3715 default=25,
3716 help='The highest/lowest % are discarded to form a '
3717 'truncated mean. Values will be clamped to range [0, '
3718 '25]. Default value is 25 (highest/lowest 25% will be '
3719 'discarded).')
3720 group.add_option('--bisect_mode',
3721 type='choice',
3722 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3723 BISECT_MODE_RETURN_CODE],
3724 default=BISECT_MODE_MEAN,
3725 help='The bisect mode. Choices are to bisect on the '
3726 'difference in mean, std_dev, or return_code.')
3727 parser.add_option_group(group)
3729 group = optparse.OptionGroup(parser, 'Build options')
3730 group.add_option('-w', '--working_directory',
3731 type='str',
3732 help='Path to the working directory where the script '
3733 'will do an initial checkout of the chromium depot. The '
3734 'files will be placed in a subdirectory "bisect" under '
3735 'working_directory and that will be used to perform the '
3736 'bisection. This parameter is optional, if it is not '
3737 'supplied, the script will work from the current depot.')
3738 group.add_option('--build_preference',
3739 type='choice',
3740 choices=['msvs', 'ninja', 'make'],
3741 help='The preferred build system to use. On linux/mac '
3742 'the options are make/ninja. On Windows, the options '
3743 'are msvs/ninja.')
3744 group.add_option('--target_platform',
3745 type='choice',
3746 choices=['chromium', 'cros', 'android', 'android-chrome'],
3747 default='chromium',
3748 help='The target platform. Choices are "chromium" '
3749 '(current platform), "cros", or "android". If you '
3750 'specify something other than "chromium", you must be '
3751 'properly set up to build that platform.')
3752 group.add_option('--no_custom_deps',
3753 dest='no_custom_deps',
3754 action="store_true",
3755 default=False,
3756 help='Run the script with custom_deps or not.')
3757 group.add_option('--extra_src',
3758 type='str',
3759 help='Path to a script which can be used to modify '
3760 'the bisect script\'s behavior.')
3761 group.add_option('--cros_board',
3762 type='str',
3763 help='The cros board type to build.')
3764 group.add_option('--cros_remote_ip',
3765 type='str',
3766 help='The remote machine to image to.')
3767 group.add_option('--use_goma',
3768 action="store_true",
3769 help='Add a bunch of extra threads for goma.')
3770 group.add_option('--output_buildbot_annotations',
3771 action="store_true",
3772 help='Add extra annotation output for buildbot.')
3773 group.add_option('--gs_bucket',
3774 default='',
3775 dest='gs_bucket',
3776 type='str',
3777 help=('Name of Google Storage bucket to upload or '
3778 'download build. e.g., chrome-perf'))
3779 group.add_option('--target_arch',
3780 type='choice',
3781 choices=['ia32', 'x64', 'arm'],
3782 default='ia32',
3783 dest='target_arch',
3784 help=('The target build architecture. Choices are "ia32" '
3785 '(default), "x64" or "arm".'))
3786 group.add_option('--target_build_type',
3787 type='choice',
3788 choices=['Release', 'Debug'],
3789 default='Release',
3790 help='The target build type. Choices are "Release" '
3791 '(default), or "Debug".')
3792 group.add_option('--builder_host',
3793 dest='builder_host',
3794 type='str',
3795 help=('Host address of server to produce build by posting'
3796 ' try job request.'))
3797 group.add_option('--builder_port',
3798 dest='builder_port',
3799 type='int',
3800 help=('HTTP port of the server to produce build by posting'
3801 ' try job request.'))
3802 parser.add_option_group(group)
3804 group = optparse.OptionGroup(parser, 'Debug options')
3805 group.add_option('--debug_ignore_build',
3806 action="store_true",
3807 help='DEBUG: Don\'t perform builds.')
3808 group.add_option('--debug_ignore_sync',
3809 action="store_true",
3810 help='DEBUG: Don\'t perform syncs.')
3811 group.add_option('--debug_ignore_perf_test',
3812 action="store_true",
3813 help='DEBUG: Don\'t perform performance tests.')
3814 parser.add_option_group(group)
3815 return parser
3817 def ParseCommandLine(self):
3818 """Parses the command line for bisect options."""
3819 parser = self._CreateCommandLineParser()
3820 (opts, _) = parser.parse_args()
3822 try:
3823 if not opts.command:
3824 raise RuntimeError('missing required parameter: --command')
3826 if not opts.good_revision:
3827 raise RuntimeError('missing required parameter: --good_revision')
3829 if not opts.bad_revision:
3830 raise RuntimeError('missing required parameter: --bad_revision')
3832 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3833 raise RuntimeError('missing required parameter: --metric')
3835 if opts.gs_bucket:
3836 if not cloud_storage.List(opts.gs_bucket):
3837 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3838 if not opts.builder_host:
3839 raise RuntimeError('Must specify try server hostname, when '
3840 'gs_bucket is used: --builder_host')
3841 if not opts.builder_port:
3842 raise RuntimeError('Must specify try server port number, when '
3843 'gs_bucket is used: --builder_port')
3844 if opts.target_platform == 'cros':
3845 # Run sudo up front to make sure credentials are cached for later.
3846 print 'Sudo is required to build cros:'
3847 print
3848 RunProcess(['sudo', 'true'])
3850 if not opts.cros_board:
3851 raise RuntimeError('missing required parameter: --cros_board')
3853 if not opts.cros_remote_ip:
3854 raise RuntimeError('missing required parameter: --cros_remote_ip')
3856 if not opts.working_directory:
3857 raise RuntimeError('missing required parameter: --working_directory')
3859 metric_values = opts.metric.split('/')
3860 if (len(metric_values) != 2 and
3861 opts.bisect_mode != BISECT_MODE_RETURN_CODE):
3862 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3864 opts.metric = metric_values
3865 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3866 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3867 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3868 opts.truncate_percent = opts.truncate_percent / 100.0
3870 for k, v in opts.__dict__.iteritems():
3871 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
3872 setattr(self, k, v)
3873 except RuntimeError, e:
3874 output_string = StringIO.StringIO()
3875 parser.print_help(file=output_string)
3876 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3877 output_string.close()
3878 raise RuntimeError(error_message)
3880 @staticmethod
3881 def FromDict(values):
3882 """Creates an instance of BisectOptions with the values parsed from a
3883 .cfg file.
3885 Args:
3886 values: a dict containing options to set.
3888 Returns:
3889 An instance of BisectOptions.
3891 opts = BisectOptions()
3892 for k, v in values.iteritems():
3893 assert hasattr(opts, k), 'Invalid %s attribute in '\
3894 'BisectOptions.' % k
3895 setattr(opts, k, v)
3897 metric_values = opts.metric.split('/')
3898 if len(metric_values) != 2:
3899 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3901 opts.metric = metric_values
3902 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3903 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3904 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3905 opts.truncate_percent = opts.truncate_percent / 100.0
3907 return opts
3910 def main():
3912 try:
3913 opts = BisectOptions()
3914 opts.ParseCommandLine()
3916 if opts.extra_src:
3917 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3918 if not extra_src:
3919 raise RuntimeError("Invalid or missing --extra_src.")
3920 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3922 if opts.working_directory:
3923 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3924 if opts.no_custom_deps:
3925 custom_deps = None
3926 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3928 os.chdir(os.path.join(os.getcwd(), 'src'))
3930 if not RemoveBuildFiles(opts.target_build_type):
3931 raise RuntimeError('Something went wrong removing the build files.')
3933 if not IsPlatformSupported(opts):
3934 raise RuntimeError("Sorry, this platform isn't supported yet.")
3936 # Check what source control method they're using. Only support git workflow
3937 # at the moment.
3938 source_control = DetermineAndCreateSourceControl(opts)
3940 if not source_control:
3941 raise RuntimeError("Sorry, only the git workflow is supported at the "
3942 "moment.")
3944 # gClient sync seems to fail if you're not in master branch.
3945 if (not source_control.IsInProperBranch() and
3946 not opts.debug_ignore_sync and
3947 not opts.working_directory):
3948 raise RuntimeError("You must switch to master branch to run bisection.")
3949 bisect_test = BisectPerformanceMetrics(source_control, opts)
3950 try:
3951 bisect_results = bisect_test.Run(opts.command,
3952 opts.bad_revision,
3953 opts.good_revision,
3954 opts.metric)
3955 if bisect_results['error']:
3956 raise RuntimeError(bisect_results['error'])
3957 bisect_test.FormatAndPrintResults(bisect_results)
3958 return 0
3959 finally:
3960 bisect_test.PerformCleanup()
3961 except RuntimeError, e:
3962 if opts.output_buildbot_annotations:
3963 # The perf dashboard scrapes the "results" step in order to comment on
3964 # bugs. If you change this, please update the perf dashboard as well.
3965 bisect_utils.OutputAnnotationStepStart('Results')
3966 print 'Error: %s' % e.message
3967 if opts.output_buildbot_annotations:
3968 bisect_utils.OutputAnnotationStepClosed()
3969 return 1
3971 if __name__ == '__main__':
3972 sys.exit(main())