DevTools: optionally report buffer usage via Tracing agent
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blob56892cd4413d5f410366c8f52cc711fbbd69751d
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
36 """
38 import copy
39 import datetime
40 import errno
41 import hashlib
42 import math
43 import optparse
44 import os
45 import re
46 import shlex
47 import shutil
48 import StringIO
49 import subprocess
50 import sys
51 import time
52 import zipfile
54 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
56 import bisect_utils
57 import post_perf_builder_job
58 from telemetry.page import cloud_storage
60 # The additional repositories that might need to be bisected.
61 # If the repository has any dependant repositories (such as skia/src needs
62 # skia/include and skia/gyp to be updated), specify them in the 'depends'
63 # so that they're synced appropriately.
64 # Format is:
65 # src: path to the working directory.
66 # recurse: True if this repositry will get bisected.
67 # depends: A list of other repositories that are actually part of the same
68 # repository in svn.
69 # svn: Needed for git workflow to resolve hashes to svn revisions.
70 # from: Parent depot that must be bisected before this is bisected.
71 # deps_var: Key name in vars varible in DEPS file that has revision information.
72 DEPOT_DEPS_NAME = {
73 'chromium' : {
74 "src" : "src",
75 "recurse" : True,
76 "depends" : None,
77 "from" : ['cros', 'android-chrome'],
78 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79 'deps_var': None
81 'webkit' : {
82 "src" : "src/third_party/WebKit",
83 "recurse" : True,
84 "depends" : None,
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87 'deps_var': 'webkit_revision'
89 'angle' : {
90 "src" : "src/third_party/angle",
91 "src_old" : "src/third_party/angle_dx11",
92 "recurse" : True,
93 "depends" : None,
94 "from" : ['chromium'],
95 "platform": 'nt',
96 'deps_var': 'angle_revision'
98 'v8' : {
99 "src" : "src/v8",
100 "recurse" : True,
101 "depends" : None,
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105 'deps_var': 'v8_revision'
107 'v8_bleeding_edge' : {
108 "src" : "src/v8_bleeding_edge",
109 "recurse" : True,
110 "depends" : None,
111 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
112 "from" : ['v8'],
113 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'deps_var': 'v8_revision'
116 'skia/src' : {
117 "src" : "src/third_party/skia/src",
118 "recurse" : True,
119 "svn" : "http://skia.googlecode.com/svn/trunk/src",
120 "depends" : ['skia/include', 'skia/gyp'],
121 "from" : ['chromium'],
122 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 'deps_var': 'skia_revision'
125 'skia/include' : {
126 "src" : "src/third_party/skia/include",
127 "recurse" : False,
128 "svn" : "http://skia.googlecode.com/svn/trunk/include",
129 "depends" : None,
130 "from" : ['chromium'],
131 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
132 'deps_var': 'None'
134 'skia/gyp' : {
135 "src" : "src/third_party/skia/gyp",
136 "recurse" : False,
137 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
138 "depends" : None,
139 "from" : ['chromium'],
140 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
141 'deps_var': 'None'
145 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
146 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147 CROS_VERSION_PATTERN = 'new version number from %s'
148 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
149 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
150 'testing_rsa')
151 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
152 'mod_for_test_scripts', 'ssh_keys',
153 'testing_rsa')
155 BUILD_RESULT_SUCCEED = 0
156 BUILD_RESULT_FAIL = 1
157 BUILD_RESULT_SKIPPED = 2
159 # Maximum time in seconds to wait after posting build request to tryserver.
160 # TODO: Change these values based on the actual time taken by buildbots on
161 # the tryserver.
162 MAX_MAC_BUILD_TIME = 7200
163 MAX_WIN_BUILD_TIME = 7200
164 MAX_LINUX_BUILD_TIME = 7200
166 # Patch template to add a new file, DEPS.sha under src folder.
167 # This file contains SHA1 value of the DEPS changes made while bisecting
168 # dependency repositories. This patch send along with DEPS patch to tryserver.
169 # When a build requested is posted with a patch, bisect builders on tryserver,
170 # once build is produced, it reads SHA value from this file and appends it
171 # to build archive filename.
172 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
173 new file mode 100644
174 --- /dev/null
175 +++ src/DEPS.sha
176 @@ -0,0 +1 @@
177 +%(deps_sha)s
180 def _AddAdditionalDepotInfo(depot_info):
181 """Adds additional depot info to the global depot variables."""
182 global DEPOT_DEPS_NAME
183 global DEPOT_NAMES
184 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
185 depot_info.items())
186 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
189 def CalculateTruncatedMean(data_set, truncate_percent):
190 """Calculates the truncated mean of a set of values.
192 Note that this isn't just the mean of the set of values with the highest
193 and lowest values discarded; the non-discarded values are also weighted
194 differently depending how many values are discarded.
196 Args:
197 data_set: Non-empty list of values.
198 truncate_percent: The % from the upper and lower portions of the data set
199 to discard, expressed as a value in [0, 1].
201 Returns:
202 The truncated mean as a float.
204 Raises:
205 TypeError: The data set was empty after discarding values.
207 if len(data_set) > 2:
208 data_set = sorted(data_set)
210 discard_num_float = len(data_set) * truncate_percent
211 discard_num_int = int(math.floor(discard_num_float))
212 kept_weight = len(data_set) - discard_num_float * 2
214 data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
216 weight_left = 1.0 - (discard_num_float - discard_num_int)
218 if weight_left < 1:
219 # If the % to discard leaves a fractional portion, need to weight those
220 # values.
221 unweighted_vals = data_set[1:len(data_set)-1]
222 weighted_vals = [data_set[0], data_set[len(data_set)-1]]
223 weighted_vals = [w * weight_left for w in weighted_vals]
224 data_set = weighted_vals + unweighted_vals
225 else:
226 kept_weight = len(data_set)
228 truncated_mean = reduce(lambda x, y: float(x) + float(y),
229 data_set) / kept_weight
231 return truncated_mean
234 def CalculateMean(values):
235 """Calculates the arithmetic mean of a list of values."""
236 return CalculateTruncatedMean(values, 0.0)
239 def CalculateConfidence(good_results_lists, bad_results_lists):
240 """Calculates a confidence percentage.
242 This is calculated based on how distinct the "good" and "bad" values are,
243 and how noisy the results are. More precisely, the confidence is the quotient
244 of the difference between the closest values across the good and bad groups
245 and the sum of the standard deviations of the good and bad groups.
247 TODO(qyearsley): Replace this confidence function with a function that
248 uses a Student's t-test. The confidence would be (1 - p-value), where
249 p-value is the probability of obtaining the given a set of good and bad
250 values just by chance.
252 Args:
253 good_results_lists: A list of lists of "good" result numbers.
254 bad_results_lists: A list of lists of "bad" result numbers.
256 Returns:
257 A number between in the range [0, 100].
259 # Get the distance between the two groups.
260 means_good = map(CalculateMean, good_results_lists)
261 means_bad = map(CalculateMean, bad_results_lists)
262 bounds_good = (min(means_good), max(means_good))
263 bounds_bad = (min(means_bad), max(means_bad))
264 dist_between_groups = min(
265 math.fabs(bounds_bad[1] - bounds_good[0]),
266 math.fabs(bounds_bad[0] - bounds_good[1]))
268 # Get the sum of the standard deviations of the two groups.
269 good_results_flattened = sum(good_results_lists, [])
270 bad_results_flattened = sum(bad_results_lists, [])
271 stddev_good = CalculateStandardDeviation(good_results_flattened)
272 stddev_bad = CalculateStandardDeviation(bad_results_flattened)
273 stddev_sum = stddev_good + stddev_bad
275 confidence = dist_between_groups / (max(0.0001, stddev_sum))
276 confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
277 return confidence
280 def CalculateStandardDeviation(values):
281 """Calculates the sample standard deviation of the given list of values."""
282 if len(values) == 1:
283 return 0.0
285 mean = CalculateMean(values)
286 differences_from_mean = [float(x) - mean for x in values]
287 squared_differences = [float(x * x) for x in differences_from_mean]
288 variance = sum(squared_differences) / (len(values) - 1)
289 std_dev = math.sqrt(variance)
291 return std_dev
294 def CalculatePooledStandardError(work_sets):
295 numerator = 0.0
296 denominator1 = 0.0
297 denominator2 = 0.0
299 for current_set in work_sets:
300 std_dev = CalculateStandardDeviation(current_set)
301 numerator += (len(current_set) - 1) * std_dev ** 2
302 denominator1 += len(current_set) - 1
303 denominator2 += 1.0 / len(current_set)
305 if denominator1:
306 return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
307 return 0.0
310 def CalculateStandardError(values):
311 """Calculates the standard error of a list of values."""
312 if len(values) <= 1:
313 return 0.0
315 std_dev = CalculateStandardDeviation(values)
317 return std_dev / math.sqrt(len(values))
320 def IsStringFloat(string_to_check):
321 """Checks whether or not the given string can be converted to a floating
322 point number.
324 Args:
325 string_to_check: Input string to check if it can be converted to a float.
327 Returns:
328 True if the string can be converted to a float.
330 try:
331 float(string_to_check)
333 return True
334 except ValueError:
335 return False
338 def IsStringInt(string_to_check):
339 """Checks whether or not the given string can be converted to a integer.
341 Args:
342 string_to_check: Input string to check if it can be converted to an int.
344 Returns:
345 True if the string can be converted to an int.
347 try:
348 int(string_to_check)
350 return True
351 except ValueError:
352 return False
355 def IsWindows():
356 """Checks whether or not the script is running on Windows.
358 Returns:
359 True if running on Windows.
361 return sys.platform == 'cygwin' or sys.platform.startswith('win')
364 def Is64BitWindows():
365 """Returns whether or not Windows is a 64-bit version.
367 Returns:
368 True if Windows is 64-bit, False if 32-bit.
370 platform = os.environ['PROCESSOR_ARCHITECTURE']
371 try:
372 platform = os.environ['PROCESSOR_ARCHITEW6432']
373 except KeyError:
374 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
375 pass
377 return platform in ['AMD64', 'I64']
380 def IsLinux():
381 """Checks whether or not the script is running on Linux.
383 Returns:
384 True if running on Linux.
386 return sys.platform.startswith('linux')
389 def IsMac():
390 """Checks whether or not the script is running on Mac.
392 Returns:
393 True if running on Mac.
395 return sys.platform.startswith('darwin')
398 def GetSHA1HexDigest(contents):
399 """Returns secured hash containing hexadecimal for the given contents."""
400 return hashlib.sha1(contents).hexdigest()
403 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
404 """Gets the archive file name for the given revision."""
405 def PlatformName():
406 """Return a string to be used in paths for the platform."""
407 if IsWindows():
408 # Build archive for x64 is still stored with 'win32'suffix
409 # (chromium_utils.PlatformName()).
410 if Is64BitWindows() and target_arch == 'x64':
411 return 'win32'
412 return 'win32'
413 if IsLinux():
414 return 'linux'
415 if IsMac():
416 return 'mac'
417 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
419 base_name = 'full-build-%s' % PlatformName()
420 if not build_revision:
421 return base_name
422 if patch_sha:
423 build_revision = '%s_%s' % (build_revision , patch_sha)
424 return '%s_%s.zip' % (base_name, build_revision)
427 def GetRemoteBuildPath(build_revision, target_arch='ia32', patch_sha=None):
428 """Compute the url to download the build from."""
429 def GetGSRootFolderName():
430 """Gets Google Cloud Storage root folder names"""
431 if IsWindows():
432 if Is64BitWindows() and target_arch == 'x64':
433 return 'Win x64 Builder'
434 return 'Win Builder'
435 if IsLinux():
436 return 'Linux Builder'
437 if IsMac():
438 return 'Mac Builder'
439 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
441 base_filename = GetZipFileName(build_revision, target_arch, patch_sha)
442 builder_folder = GetGSRootFolderName()
443 return '%s/%s' % (builder_folder, base_filename)
446 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
447 """Fetches file(s) from the Google Cloud Storage.
449 Args:
450 bucket_name: Google Storage bucket name.
451 source_path: Source file path.
452 destination_path: Destination file path.
454 Returns:
455 True if the fetching succeeds, otherwise False.
457 target_file = os.path.join(destination_path, os.path.basename(source_path))
458 try:
459 if cloud_storage.Exists(bucket_name, source_path):
460 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
461 cloud_storage.Get(bucket_name, source_path, destination_path)
462 if os.path.exists(target_file):
463 return True
464 else:
465 print ('File gs://%s/%s not found in cloud storage.' % (
466 bucket_name, source_path))
467 except Exception as e:
468 print 'Something went wrong while fetching file from cloud: %s' % e
469 if os.path.exists(target_file):
470 os.remove(target_file)
471 return False
474 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
475 def MaybeMakeDirectory(*path):
476 """Creates an entire path, if it doesn't already exist."""
477 file_path = os.path.join(*path)
478 try:
479 os.makedirs(file_path)
480 except OSError, e:
481 if e.errno != errno.EEXIST:
482 return False
483 return True
486 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
487 def ExtractZip(filename, output_dir, verbose=True):
488 """ Extract the zip archive in the output directory."""
489 MaybeMakeDirectory(output_dir)
491 # On Linux and Mac, we use the unzip command as it will
492 # handle links and file bits (executable), which is much
493 # easier then trying to do that with ZipInfo options.
495 # On Windows, try to use 7z if it is installed, otherwise fall back to python
496 # zip module and pray we don't have files larger than 512MB to unzip.
497 unzip_cmd = None
498 if IsMac() or IsLinux():
499 unzip_cmd = ['unzip', '-o']
500 elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
501 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
503 if unzip_cmd:
504 # Make sure path is absolute before changing directories.
505 filepath = os.path.abspath(filename)
506 saved_dir = os.getcwd()
507 os.chdir(output_dir)
508 command = unzip_cmd + [filepath]
509 result = RunProcess(command)
510 os.chdir(saved_dir)
511 if result:
512 raise IOError('unzip failed: %s => %s' % (str(command), result))
513 else:
514 assert IsWindows()
515 zf = zipfile.ZipFile(filename)
516 for name in zf.namelist():
517 if verbose:
518 print 'Extracting %s' % name
519 zf.extract(name, output_dir)
522 def RunProcess(command):
523 """Run an arbitrary command. If output from the call is needed, use
524 RunProcessAndRetrieveOutput instead.
526 Args:
527 command: A list containing the command and args to execute.
529 Returns:
530 The return code of the call.
532 # On Windows, use shell=True to get PATH interpretation.
533 shell = IsWindows()
534 return subprocess.call(command, shell=shell)
537 def RunProcessAndRetrieveOutput(command, cwd=None):
538 """Run an arbitrary command, returning its output and return code. Since
539 output is collected via communicate(), there will be no output until the
540 call terminates. If you need output while the program runs (ie. so
541 that the buildbot doesn't terminate the script), consider RunProcess().
543 Args:
544 command: A list containing the command and args to execute.
546 Returns:
547 A tuple of the output and return code.
549 # On Windows, use shell=True to get PATH interpretation.
550 shell = IsWindows()
551 proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE, cwd=cwd)
553 (output, _) = proc.communicate()
555 return (output, proc.returncode)
558 def RunGit(command, cwd=None):
559 """Run a git subcommand, returning its output and return code.
561 Args:
562 command: A list containing the args to git.
564 Returns:
565 A tuple of the output and return code.
567 command = ['git'] + command
569 return RunProcessAndRetrieveOutput(command, cwd=cwd)
572 def CheckRunGit(command, cwd=None):
573 """Run a git subcommand, returning its output and return code. Asserts if
574 the return code of the call is non-zero.
576 Args:
577 command: A list containing the args to git.
579 Returns:
580 A tuple of the output and return code.
582 (output, return_code) = RunGit(command, cwd=cwd)
584 assert not return_code, 'An error occurred while running'\
585 ' "git %s"' % ' '.join(command)
586 return output
589 def SetBuildSystemDefault(build_system):
590 """Sets up any environment variables needed to build with the specified build
591 system.
593 Args:
594 build_system: A string specifying build system. Currently only 'ninja' or
595 'make' are supported."""
596 if build_system == 'ninja':
597 gyp_var = os.getenv('GYP_GENERATORS')
599 if not gyp_var or not 'ninja' in gyp_var:
600 if gyp_var:
601 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
602 else:
603 os.environ['GYP_GENERATORS'] = 'ninja'
605 if IsWindows():
606 os.environ['GYP_DEFINES'] = 'component=shared_library '\
607 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
608 'chromium_win_pch=0'
609 elif build_system == 'make':
610 os.environ['GYP_GENERATORS'] = 'make'
611 else:
612 raise RuntimeError('%s build not supported.' % build_system)
615 def BuildWithMake(threads, targets):
616 cmd = ['make', 'BUILDTYPE=Release']
618 if threads:
619 cmd.append('-j%d' % threads)
621 cmd += targets
623 return_code = RunProcess(cmd)
625 return not return_code
628 def BuildWithNinja(threads, targets):
629 cmd = ['ninja', '-C', os.path.join('out', 'Release')]
631 if threads:
632 cmd.append('-j%d' % threads)
634 cmd += targets
636 return_code = RunProcess(cmd)
638 return not return_code
641 def BuildWithVisualStudio(targets):
642 path_to_devenv = os.path.abspath(
643 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
644 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
645 cmd = [path_to_devenv, '/build', 'Release', path_to_sln]
647 for t in targets:
648 cmd.extend(['/Project', t])
650 return_code = RunProcess(cmd)
652 return not return_code
655 def WriteStringToFile(text, file_name):
656 with open(file_name, "w") as f:
657 f.write(text)
660 def ReadStringFromFile(file_name):
661 with open(file_name) as f:
662 return f.read()
665 def ChangeBackslashToSlashInPatch(diff_text):
666 """Formats file paths in the given text to unix-style paths."""
667 if diff_text:
668 diff_lines = diff_text.split('\n')
669 for i in range(len(diff_lines)):
670 if (diff_lines[i].startswith('--- ') or
671 diff_lines[i].startswith('+++ ')):
672 diff_lines[i] = diff_lines[i].replace('\\', '/')
673 return '\n'.join(diff_lines)
674 return None
677 class Builder(object):
678 """Builder is used by the bisect script to build relevant targets and deploy.
680 def __init__(self, opts):
681 """Performs setup for building with target build system.
683 Args:
684 opts: Options parsed from command line.
686 if IsWindows():
687 if not opts.build_preference:
688 opts.build_preference = 'msvs'
690 if opts.build_preference == 'msvs':
691 if not os.getenv('VS100COMNTOOLS'):
692 raise RuntimeError(
693 'Path to visual studio could not be determined.')
694 else:
695 SetBuildSystemDefault(opts.build_preference)
696 else:
697 if not opts.build_preference:
698 if 'ninja' in os.getenv('GYP_GENERATORS'):
699 opts.build_preference = 'ninja'
700 else:
701 opts.build_preference = 'make'
703 SetBuildSystemDefault(opts.build_preference)
705 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
706 raise RuntimeError('Failed to set platform environment.')
708 bisect_utils.RunGClient(['runhooks'])
710 @staticmethod
711 def FromOpts(opts):
712 builder = None
713 if opts.target_platform == 'cros':
714 builder = CrosBuilder(opts)
715 elif opts.target_platform == 'android':
716 builder = AndroidBuilder(opts)
717 elif opts.target_platform == 'android-chrome':
718 builder = AndroidChromeBuilder(opts)
719 else:
720 builder = DesktopBuilder(opts)
721 return builder
723 def Build(self, depot, opts):
724 raise NotImplementedError()
726 def GetBuildOutputDirectory(self, opts, src_dir=None):
727 raise NotImplementedError()
730 class DesktopBuilder(Builder):
731 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
732 def __init__(self, opts):
733 super(DesktopBuilder, self).__init__(opts)
735 def Build(self, depot, opts):
736 """Builds chromium_builder_perf target using options passed into
737 the script.
739 Args:
740 depot: Current depot being bisected.
741 opts: The options parsed from the command line.
743 Returns:
744 True if build was successful.
746 targets = ['chromium_builder_perf']
748 threads = None
749 if opts.use_goma:
750 threads = 64
752 build_success = False
753 if opts.build_preference == 'make':
754 build_success = BuildWithMake(threads, targets)
755 elif opts.build_preference == 'ninja':
756 build_success = BuildWithNinja(threads, targets)
757 elif opts.build_preference == 'msvs':
758 assert IsWindows(), 'msvs is only supported on Windows.'
759 build_success = BuildWithVisualStudio(targets)
760 else:
761 assert False, 'No build system defined.'
762 return build_success
764 def GetBuildOutputDirectory(self, opts, src_dir=None):
765 """Returns the path to the build directory, relative to the checkout root.
767 Assumes that the current working directory is the checkout root.
769 src_dir = src_dir or 'src'
770 if opts.build_preference == 'ninja' or IsLinux():
771 return os.path.join(src_dir, 'out')
772 if IsMac():
773 return os.path.join(src_dir, 'xcodebuild')
774 if IsWindows():
775 return os.path.join(src_dir, 'build')
776 raise NotImplementedError('Unexpected platform %s' % sys.platform)
779 class AndroidBuilder(Builder):
780 """AndroidBuilder is used to build on android."""
781 def __init__(self, opts):
782 super(AndroidBuilder, self).__init__(opts)
784 def _GetTargets(self):
785 return ['chrome_shell', 'cc_perftests_apk', 'android_tools']
787 def Build(self, depot, opts):
788 """Builds the android content shell and other necessary tools using options
789 passed into the script.
791 Args:
792 depot: Current depot being bisected.
793 opts: The options parsed from the command line.
795 Returns:
796 True if build was successful.
798 threads = None
799 if opts.use_goma:
800 threads = 64
802 build_success = False
803 if opts.build_preference == 'ninja':
804 build_success = BuildWithNinja(threads, self._GetTargets())
805 else:
806 assert False, 'No build system defined.'
808 return build_success
811 class AndroidChromeBuilder(AndroidBuilder):
812 """AndroidBuilder is used to build on android's chrome."""
813 def __init__(self, opts):
814 super(AndroidChromeBuilder, self).__init__(opts)
816 def _GetTargets(self):
817 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
820 class CrosBuilder(Builder):
821 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
822 target platform."""
823 def __init__(self, opts):
824 super(CrosBuilder, self).__init__(opts)
826 def ImageToTarget(self, opts):
827 """Installs latest image to target specified by opts.cros_remote_ip.
829 Args:
830 opts: Program options containing cros_board and cros_remote_ip.
832 Returns:
833 True if successful.
835 try:
836 # Keys will most likely be set to 0640 after wiping the chroot.
837 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
838 os.chmod(CROS_TEST_KEY_PATH, 0600)
839 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
840 '--remote=%s' % opts.cros_remote_ip,
841 '--board=%s' % opts.cros_board, '--test', '--verbose']
843 return_code = RunProcess(cmd)
844 return not return_code
845 except OSError, e:
846 return False
848 def BuildPackages(self, opts, depot):
849 """Builds packages for cros.
851 Args:
852 opts: Program options containing cros_board.
853 depot: The depot being bisected.
855 Returns:
856 True if successful.
858 cmd = [CROS_SDK_PATH]
860 if depot != 'cros':
861 path_to_chrome = os.path.join(os.getcwd(), '..')
862 cmd += ['--chrome_root=%s' % path_to_chrome]
864 cmd += ['--']
866 if depot != 'cros':
867 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
869 cmd += ['BUILDTYPE=Release', './build_packages',
870 '--board=%s' % opts.cros_board]
871 return_code = RunProcess(cmd)
873 return not return_code
875 def BuildImage(self, opts, depot):
876 """Builds test image for cros.
878 Args:
879 opts: Program options containing cros_board.
880 depot: The depot being bisected.
882 Returns:
883 True if successful.
885 cmd = [CROS_SDK_PATH]
887 if depot != 'cros':
888 path_to_chrome = os.path.join(os.getcwd(), '..')
889 cmd += ['--chrome_root=%s' % path_to_chrome]
891 cmd += ['--']
893 if depot != 'cros':
894 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
896 cmd += ['BUILDTYPE=Release', '--', './build_image',
897 '--board=%s' % opts.cros_board, 'test']
899 return_code = RunProcess(cmd)
901 return not return_code
903 def Build(self, depot, opts):
904 """Builds targets using options passed into the script.
906 Args:
907 depot: Current depot being bisected.
908 opts: The options parsed from the command line.
910 Returns:
911 True if build was successful.
913 if self.BuildPackages(opts, depot):
914 if self.BuildImage(opts, depot):
915 return self.ImageToTarget(opts)
916 return False
919 class SourceControl(object):
920 """SourceControl is an abstraction over the underlying source control
921 system used for chromium. For now only git is supported, but in the
922 future, the svn workflow could be added as well."""
923 def __init__(self):
924 super(SourceControl, self).__init__()
926 def SyncToRevisionWithGClient(self, revision):
927 """Uses gclient to sync to the specified revision.
929 ie. gclient sync --revision <revision>
931 Args:
932 revision: The git SHA1 or svn CL (depending on workflow).
934 Returns:
935 The return code of the call.
937 return bisect_utils.RunGClient(['sync', '--revision',
938 revision, '--verbose', '--nohooks', '--reset', '--force'])
940 def SyncToRevisionWithRepo(self, timestamp):
941 """Uses repo to sync all the underlying git depots to the specified
942 time.
944 Args:
945 timestamp: The unix timestamp to sync to.
947 Returns:
948 The return code of the call.
950 return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
953 class GitSourceControl(SourceControl):
954 """GitSourceControl is used to query the underlying source control. """
955 def __init__(self, opts):
956 super(GitSourceControl, self).__init__()
957 self.opts = opts
959 def IsGit(self):
960 return True
962 def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
963 """Retrieves a list of revisions between |revision_range_start| and
964 |revision_range_end|.
966 Args:
967 revision_range_end: The SHA1 for the end of the range.
968 revision_range_start: The SHA1 for the beginning of the range.
970 Returns:
971 A list of the revisions between |revision_range_start| and
972 |revision_range_end| (inclusive).
974 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
975 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
976 log_output = CheckRunGit(cmd, cwd=cwd)
978 revision_hash_list = log_output.split()
979 revision_hash_list.append(revision_range_start)
981 return revision_hash_list
983 def SyncToRevision(self, revision, sync_client=None):
984 """Syncs to the specified revision.
986 Args:
987 revision: The revision to sync to.
988 use_gclient: Specifies whether or not we should sync using gclient or
989 just use source control directly.
991 Returns:
992 True if successful.
995 if not sync_client:
996 results = RunGit(['checkout', revision])[1]
997 elif sync_client == 'gclient':
998 results = self.SyncToRevisionWithGClient(revision)
999 elif sync_client == 'repo':
1000 results = self.SyncToRevisionWithRepo(revision)
1002 return not results
1004 def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
1005 """If an SVN revision is supplied, try to resolve it to a git SHA1.
1007 Args:
1008 revision_to_check: The user supplied revision string that may need to be
1009 resolved to a git SHA1.
1010 depot: The depot the revision_to_check is from.
1011 search: The number of changelists to try if the first fails to resolve
1012 to a git hash. If the value is negative, the function will search
1013 backwards chronologically, otherwise it will search forward.
1015 Returns:
1016 A string containing a git SHA1 hash, otherwise None.
1018 # Android-chrome is git only, so no need to resolve this to anything else.
1019 if depot == 'android-chrome':
1020 return revision_to_check
1022 if depot != 'cros':
1023 if not IsStringInt(revision_to_check):
1024 return revision_to_check
1026 depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
1028 if depot != 'chromium':
1029 depot_svn = DEPOT_DEPS_NAME[depot]['svn']
1031 svn_revision = int(revision_to_check)
1032 git_revision = None
1034 if search > 0:
1035 search_range = xrange(svn_revision, svn_revision + search, 1)
1036 else:
1037 search_range = xrange(svn_revision, svn_revision + search, -1)
1039 for i in search_range:
1040 svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
1041 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
1042 'origin/master']
1044 (log_output, return_code) = RunGit(cmd, cwd=cwd)
1046 assert not return_code, 'An error occurred while running'\
1047 ' "git %s"' % ' '.join(cmd)
1049 if not return_code:
1050 log_output = log_output.strip()
1052 if log_output:
1053 git_revision = log_output
1055 break
1057 return git_revision
1058 else:
1059 if IsStringInt(revision_to_check):
1060 return int(revision_to_check)
1061 else:
1062 cwd = os.getcwd()
1063 os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
1064 'chromiumos-overlay'))
1065 pattern = CROS_VERSION_PATTERN % revision_to_check
1066 cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
1068 git_revision = None
1070 log_output = CheckRunGit(cmd, cwd=cwd)
1071 if log_output:
1072 git_revision = log_output
1073 git_revision = int(log_output.strip())
1074 os.chdir(cwd)
1076 return git_revision
1078 def IsInProperBranch(self):
1079 """Confirms they're in the master branch for performing the bisection.
1080 This is needed or gclient will fail to sync properly.
1082 Returns:
1083 True if the current branch on src is 'master'
1085 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
1086 log_output = CheckRunGit(cmd)
1087 log_output = log_output.strip()
1089 return log_output == "master"
1091 def SVNFindRev(self, revision, cwd=None):
1092 """Maps directly to the 'git svn find-rev' command.
1094 Args:
1095 revision: The git SHA1 to use.
1097 Returns:
1098 An integer changelist #, otherwise None.
1101 cmd = ['svn', 'find-rev', revision]
1103 output = CheckRunGit(cmd, cwd)
1104 svn_revision = output.strip()
1106 if IsStringInt(svn_revision):
1107 return int(svn_revision)
1109 return None
1111 def QueryRevisionInfo(self, revision, cwd=None):
1112 """Gathers information on a particular revision, such as author's name,
1113 email, subject, and date.
1115 Args:
1116 revision: Revision you want to gather information on.
1117 Returns:
1118 A dict in the following format:
1120 'author': %s,
1121 'email': %s,
1122 'date': %s,
1123 'subject': %s,
1124 'body': %s,
1127 commit_info = {}
1129 formats = ['%cN', '%cE', '%s', '%cD', '%b']
1130 targets = ['author', 'email', 'subject', 'date', 'body']
1132 for i in xrange(len(formats)):
1133 cmd = ['log', '--format=%s' % formats[i], '-1', revision]
1134 output = CheckRunGit(cmd, cwd=cwd)
1135 commit_info[targets[i]] = output.rstrip()
1137 return commit_info
1139 def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
1140 """Performs a checkout on a file at the given revision.
1142 Returns:
1143 True if successful.
1145 return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
1147 def RevertFileToHead(self, file_name):
1148 """Unstages a file and returns it to HEAD.
1150 Returns:
1151 True if successful.
1153 # Reset doesn't seem to return 0 on success.
1154 RunGit(['reset', 'HEAD', file_name])
1156 return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
1158 def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
1159 """Returns a list of commits that modified this file.
1161 Args:
1162 filename: Name of file.
1163 revision_start: Start of revision range.
1164 revision_end: End of revision range.
1166 Returns:
1167 Returns a list of commits that touched this file.
1169 cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
1170 filename]
1171 output = CheckRunGit(cmd)
1173 return [o for o in output.split('\n') if o]
1175 class BisectPerformanceMetrics(object):
1176 """BisectPerformanceMetrics performs a bisection against a list of range
1177 of revisions to narrow down where performance regressions may have
1178 occurred."""
1180 def __init__(self, source_control, opts):
1181 super(BisectPerformanceMetrics, self).__init__()
1183 self.opts = opts
1184 self.source_control = source_control
1185 self.src_cwd = os.getcwd()
1186 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1187 self.depot_cwd = {}
1188 self.cleanup_commands = []
1189 self.warnings = []
1190 self.builder = Builder.FromOpts(opts)
1192 # This always starts true since the script grabs latest first.
1193 self.was_blink = True
1195 for d in DEPOT_NAMES:
1196 # The working directory of each depot is just the path to the depot, but
1197 # since we're already in 'src', we can skip that part.
1199 self.depot_cwd[d] = os.path.join(
1200 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1202 def PerformCleanup(self):
1203 """Performs cleanup when script is finished."""
1204 os.chdir(self.src_cwd)
1205 for c in self.cleanup_commands:
1206 if c[0] == 'mv':
1207 shutil.move(c[1], c[2])
1208 else:
1209 assert False, 'Invalid cleanup command.'
1211 def GetRevisionList(self, depot, bad_revision, good_revision):
1212 """Retrieves a list of all the commits between the bad revision and
1213 last known good revision."""
1215 revision_work_list = []
1217 if depot == 'cros':
1218 revision_range_start = good_revision
1219 revision_range_end = bad_revision
1221 cwd = os.getcwd()
1222 self.ChangeToDepotWorkingDirectory('cros')
1224 # Print the commit timestamps for every commit in the revision time
1225 # range. We'll sort them and bisect by that. There is a remote chance that
1226 # 2 (or more) commits will share the exact same timestamp, but it's
1227 # probably safe to ignore that case.
1228 cmd = ['repo', 'forall', '-c',
1229 'git log --format=%%ct --before=%d --after=%d' % (
1230 revision_range_end, revision_range_start)]
1231 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1233 assert not return_code, 'An error occurred while running'\
1234 ' "%s"' % ' '.join(cmd)
1236 os.chdir(cwd)
1238 revision_work_list = list(set(
1239 [int(o) for o in output.split('\n') if IsStringInt(o)]))
1240 revision_work_list = sorted(revision_work_list, reverse=True)
1241 else:
1242 cwd = self._GetDepotDirectory(depot)
1243 revision_work_list = self.source_control.GetRevisionList(bad_revision,
1244 good_revision, cwd=cwd)
1246 return revision_work_list
1248 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1249 svn_revision = self.source_control.SVNFindRev(revision)
1251 if IsStringInt(svn_revision):
1252 # V8 is tricky to bisect, in that there are only a few instances when
1253 # we can dive into bleeding_edge and get back a meaningful result.
1254 # Try to detect a V8 "business as usual" case, which is when:
1255 # 1. trunk revision N has description "Version X.Y.Z"
1256 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1257 # trunk. Now working on X.Y.(Z+1)."
1259 # As of 01/24/2014, V8 trunk descriptions are formatted:
1260 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1261 # So we can just try parsing that out first and fall back to the old way.
1262 v8_dir = self._GetDepotDirectory('v8')
1263 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1265 revision_info = self.source_control.QueryRevisionInfo(revision,
1266 cwd=v8_dir)
1268 version_re = re.compile("Version (?P<values>[0-9,.]+)")
1270 regex_results = version_re.search(revision_info['subject'])
1272 if regex_results:
1273 git_revision = None
1275 # Look for "based on bleeding_edge" and parse out revision
1276 if 'based on bleeding_edge' in revision_info['subject']:
1277 try:
1278 bleeding_edge_revision = revision_info['subject'].split(
1279 'bleeding_edge revision r')[1]
1280 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1281 git_revision = self.source_control.ResolveToRevision(
1282 bleeding_edge_revision, 'v8_bleeding_edge', 1,
1283 cwd=v8_bleeding_edge_dir)
1284 return git_revision
1285 except (IndexError, ValueError):
1286 pass
1288 if not git_revision:
1289 # Wasn't successful, try the old way of looking for "Prepare push to"
1290 git_revision = self.source_control.ResolveToRevision(
1291 int(svn_revision) - 1, 'v8_bleeding_edge', -1,
1292 cwd=v8_bleeding_edge_dir)
1294 if git_revision:
1295 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1296 cwd=v8_bleeding_edge_dir)
1298 if 'Prepare push to trunk' in revision_info['subject']:
1299 return git_revision
1300 return None
1302 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1303 cwd = self._GetDepotDirectory('v8')
1304 cmd = ['log', '--format=%ct', '-1', revision]
1305 output = CheckRunGit(cmd, cwd=cwd)
1306 commit_time = int(output)
1307 commits = []
1309 if search_forward:
1310 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1311 'origin/master']
1312 output = CheckRunGit(cmd, cwd=cwd)
1313 output = output.split()
1314 commits = output
1315 commits = reversed(commits)
1316 else:
1317 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1318 'origin/master']
1319 output = CheckRunGit(cmd, cwd=cwd)
1320 output = output.split()
1321 commits = output
1323 bleeding_edge_revision = None
1325 for c in commits:
1326 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1327 if bleeding_edge_revision:
1328 break
1330 return bleeding_edge_revision
1332 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1333 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1335 Returns:
1336 A dict in the format {depot:revision} if successful, otherwise None.
1339 cwd = os.getcwd()
1340 self.ChangeToDepotWorkingDirectory(depot)
1342 results = {}
1344 if depot == 'chromium' or depot == 'android-chrome':
1345 locals = {'Var': lambda _: locals["vars"][_],
1346 'From': lambda *args: None}
1347 execfile(bisect_utils.FILE_DEPS_GIT, {}, locals)
1349 os.chdir(cwd)
1351 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1353 for d in DEPOT_NAMES:
1354 if DEPOT_DEPS_NAME[d].has_key('platform'):
1355 if DEPOT_DEPS_NAME[d]['platform'] != os.name:
1356 continue
1358 if (DEPOT_DEPS_NAME[d]['recurse'] and
1359 depot in DEPOT_DEPS_NAME[d]['from']):
1360 if (locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']) or
1361 locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src_old'])):
1362 if locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']):
1363 re_results = rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src']])
1364 self.depot_cwd[d] = \
1365 os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1366 elif locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src_old']):
1367 re_results = \
1368 rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src_old']])
1369 self.depot_cwd[d] = \
1370 os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src_old'][4:])
1372 if re_results:
1373 results[d] = re_results.group('revision')
1374 else:
1375 print 'Couldn\'t parse revision for %s.' % d
1376 print
1377 return None
1378 else:
1379 print 'Couldn\'t find %s while parsing .DEPS.git.' % d
1380 print
1381 return None
1382 elif depot == 'cros':
1383 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1384 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1385 CROS_CHROMEOS_PATTERN]
1386 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1388 assert not return_code, 'An error occurred while running' \
1389 ' "%s"' % ' '.join(cmd)
1391 if len(output) > CROS_CHROMEOS_PATTERN:
1392 output = output[len(CROS_CHROMEOS_PATTERN):]
1394 if len(output) > 1:
1395 output = output.split('_')[0]
1397 if len(output) > 3:
1398 contents = output.split('.')
1400 version = contents[2]
1402 if contents[3] != '0':
1403 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1404 (version, contents[3], version)
1405 if not warningText in self.warnings:
1406 self.warnings.append(warningText)
1408 cwd = os.getcwd()
1409 self.ChangeToDepotWorkingDirectory('chromium')
1410 return_code = CheckRunGit(['log', '-1', '--format=%H',
1411 '--author=chrome-release@google.com', '--grep=to %s' % version,
1412 'origin/master'])
1413 os.chdir(cwd)
1415 results['chromium'] = output.strip()
1416 elif depot == 'v8':
1417 # We can't try to map the trunk revision to bleeding edge yet, because
1418 # we don't know which direction to try to search in. Have to wait until
1419 # the bisect has narrowed the results down to 2 v8 rolls.
1420 results['v8_bleeding_edge'] = None
1422 return results
1424 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1425 """Backs up or restores build output directory based on restore argument.
1427 Args:
1428 restore: Indicates whether to restore or backup. Default is False(Backup)
1429 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1431 Returns:
1432 Path to backup or restored location as string. otherwise None if it fails.
1434 build_dir = os.path.abspath(
1435 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1436 source_dir = os.path.join(build_dir, build_type)
1437 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1438 if restore:
1439 source_dir, destination_dir = destination_dir, source_dir
1440 if os.path.exists(source_dir):
1441 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1442 shutil.move(source_dir, destination_dir)
1443 return destination_dir
1444 return None
1446 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1447 """Download the build archive for the given revision.
1449 Args:
1450 revision: The SVN revision to build.
1451 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1453 Returns:
1454 True if download succeeds, otherwise False.
1456 patch_sha = None
1457 if patch:
1458 # Get the SHA of the DEPS changes patch.
1459 patch_sha = GetSHA1HexDigest(patch)
1461 # Update the DEPS changes patch with a patch to create a new file named
1462 # 'DEPS.sha' and add patch_sha evaluated above to it.
1463 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1465 # Source archive file path on cloud storage.
1466 source_file = GetRemoteBuildPath(revision, self.opts.target_arch, patch_sha)
1468 # Get Build output directory
1469 abs_build_dir = os.path.abspath(
1470 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1471 # Downloaded archive file path.
1472 downloaded_file = os.path.join(
1473 abs_build_dir,
1474 GetZipFileName(revision, self.opts.target_arch, patch_sha))
1476 fetch_build_func = lambda: FetchFromCloudStorage(self.opts.gs_bucket,
1477 source_file,
1478 abs_build_dir)
1480 if not fetch_build_func():
1481 if not self.PostBuildRequestAndWait(revision,
1482 condition=fetch_build_func,
1483 patch=patch):
1484 raise RuntimeError('Somewthing went wrong while processing build'
1485 'request for: %s' % revision)
1486 # Generic name for the archive, created when archive file is extracted.
1487 output_dir = os.path.join(
1488 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1489 # Unzip build archive directory.
1490 try:
1491 RmTreeAndMkDir(output_dir, skip_makedir=True)
1492 ExtractZip(downloaded_file, abs_build_dir)
1493 if os.path.exists(output_dir):
1494 self.BackupOrRestoreOutputdirectory(restore=False)
1495 # Build output directory based on target(e.g. out/Release, out/Debug).
1496 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1497 print 'Moving build from %s to %s' % (
1498 output_dir, target_build_output_dir)
1499 shutil.move(output_dir, target_build_output_dir)
1500 return True
1501 raise IOError('Missing extracted folder %s ' % output_dir)
1502 except Exception as e:
1503 print 'Somewthing went wrong while extracting archive file: %s' % e
1504 self.BackupOrRestoreOutputdirectory(restore=True)
1505 # Cleanup any leftovers from unzipping.
1506 if os.path.exists(output_dir):
1507 RmTreeAndMkDir(output_dir, skip_makedir=True)
1508 finally:
1509 # Delete downloaded archive
1510 if os.path.exists(downloaded_file):
1511 os.remove(downloaded_file)
1512 return False
1514 def PostBuildRequestAndWait(self, revision, condition, patch=None):
1515 """POSTs the build request job to the tryserver instance."""
1517 def GetBuilderNameAndBuildTime(target_arch='ia32'):
1518 """Gets builder bot name and buildtime in seconds based on platform."""
1519 # Bot names should match the one listed in tryserver.chromium's
1520 # master.cfg which produces builds for bisect.
1521 if IsWindows():
1522 if Is64BitWindows() and target_arch == 'x64':
1523 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1524 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1525 if IsLinux():
1526 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1527 if IsMac():
1528 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1529 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1530 if not condition:
1531 return False
1533 bot_name, build_timeout = GetBuilderNameAndBuildTime(self.opts.target_arch)
1535 # Creates a try job description.
1536 job_args = {'host': self.opts.builder_host,
1537 'port': self.opts.builder_port,
1538 'revision': 'src@%s' % revision,
1539 'bot': bot_name,
1540 'name': 'Bisect Job-%s' % revision
1542 # Update patch information if supplied.
1543 if patch:
1544 job_args['patch'] = patch
1545 # Posts job to build the revision on the server.
1546 if post_perf_builder_job.PostTryJob(job_args):
1547 poll_interval = 60
1548 start_time = time.time()
1549 while True:
1550 res = condition()
1551 if res:
1552 return res
1553 elapsed_time = time.time() - start_time
1554 if elapsed_time > build_timeout:
1555 raise RuntimeError('Timed out while waiting %ds for %s build.' %
1556 (build_timeout, revision))
1557 print ('Time elapsed: %ss, still waiting for %s build' %
1558 (elapsed_time, revision))
1559 time.sleep(poll_interval)
1560 return False
1562 def IsDownloadable(self, depot):
1563 """Checks if build is downloadable based on target platform and depot."""
1564 if self.opts.target_platform in ['chromium'] and self.opts.gs_bucket:
1565 return (depot == 'chromium' or
1566 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1567 'v8' in DEPOT_DEPS_NAME[depot]['from'])
1568 return False
1570 def UpdateDeps(self, revision, depot, deps_file):
1571 """Updates DEPS file with new revision of dependency repository.
1573 This method search DEPS for a particular pattern in which depot revision
1574 is specified (e.g "webkit_revision": "123456"). If a match is found then
1575 it resolves the given git hash to SVN revision and replace it in DEPS file.
1577 Args:
1578 revision: A git hash revision of the dependency repository.
1579 depot: Current depot being bisected.
1580 deps_file: Path to DEPS file.
1582 Returns:
1583 True if DEPS file is modified successfully, otherwise False.
1585 if not os.path.exists(deps_file):
1586 return False
1588 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1589 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1590 if not deps_var:
1591 print 'DEPS update not supported for Depot: %s', depot
1592 return False
1594 # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1595 # contains "angle_revision" key that holds git hash instead of SVN revision.
1596 # And sometime "angle_revision" key is not specified in "vars" variable,
1597 # in such cases check "deps" dictionary variable that matches
1598 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1599 if depot == 'angle':
1600 return self.UpdateDEPSForAngle(revision, depot, deps_file)
1602 try:
1603 deps_contents = ReadStringFromFile(deps_file)
1604 # Check whether the depot and revision pattern in DEPS file vars
1605 # e.g. for webkit the format is "webkit_revision": "12345".
1606 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1607 re.MULTILINE)
1608 match = re.search(deps_revision, deps_contents)
1609 if match:
1610 svn_revision = self.source_control.SVNFindRev(
1611 revision, self._GetDepotDirectory(depot))
1612 if not svn_revision:
1613 print 'Could not determine SVN revision for %s' % revision
1614 return False
1615 # Update the revision information for the given depot
1616 new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1618 # For v8_bleeding_edge revisions change V8 branch in order
1619 # to fetch bleeding edge revision.
1620 if depot == 'v8_bleeding_edge':
1621 new_data = self.UpdateV8Branch(new_data)
1622 if not new_data:
1623 return False
1624 # Write changes to DEPS file
1625 WriteStringToFile(new_data, deps_file)
1626 return True
1627 except IOError, e:
1628 print 'Something went wrong while updating DEPS file. [%s]' % e
1629 return False
1631 def UpdateV8Branch(self, deps_content):
1632 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1634 Check for "v8_branch" in DEPS file if exists update its value
1635 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1636 variable from DEPS revision 254916, therefore check for "src/v8":
1637 <v8 source path> in DEPS in order to support prior DEPS revisions
1638 and update it.
1640 Args:
1641 deps_content: DEPS file contents to be modified.
1643 Returns:
1644 Modified DEPS file contents as a string.
1646 new_branch = r'branches/bleeding_edge'
1647 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
1648 if re.search(v8_branch_pattern, deps_content):
1649 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
1650 else:
1651 # Replaces the branch assigned to "src/v8" key in DEPS file.
1652 # Format of "src/v8" in DEPS:
1653 # "src/v8":
1654 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1655 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1656 v8_src_pattern = re.compile(
1657 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
1658 if re.search(v8_src_pattern, deps_content):
1659 deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
1660 return deps_content
1662 def UpdateDEPSForAngle(self, revision, depot, deps_file):
1663 """Updates DEPS file with new revision for Angle repository.
1665 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1666 variable contains "angle_revision" key that holds git hash instead of
1667 SVN revision.
1669 And sometimes "angle_revision" key is not specified in "vars" variable,
1670 in such cases check "deps" dictionary variable that matches
1671 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1673 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1674 try:
1675 deps_contents = ReadStringFromFile(deps_file)
1676 # Check whether the depot and revision pattern in DEPS file vars variable
1677 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1678 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1679 deps_var, re.MULTILINE)
1680 match = re.search(angle_rev_pattern % deps_var, deps_contents)
1681 if match:
1682 # Update the revision information for the given depot
1683 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1684 else:
1685 # Check whether the depot and revision pattern in DEPS file deps
1686 # variable. e.g.,
1687 # "src/third_party/angle": Var("chromium_git") +
1688 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1689 angle_rev_pattern = re.compile(
1690 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
1691 match = re.search(angle_rev_pattern, deps_contents)
1692 if not match:
1693 print 'Could not find angle revision information in DEPS file.'
1694 return False
1695 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1696 # Write changes to DEPS file
1697 WriteStringToFile(new_data, deps_file)
1698 return True
1699 except IOError, e:
1700 print 'Something went wrong while updating DEPS file, %s' % e
1701 return False
1703 def CreateDEPSPatch(self, depot, revision):
1704 """Modifies DEPS and returns diff as text.
1706 Args:
1707 depot: Current depot being bisected.
1708 revision: A git hash revision of the dependency repository.
1710 Returns:
1711 A tuple with git hash of chromium revision and DEPS patch text.
1713 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1714 if not os.path.exists(deps_file_path):
1715 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1716 # Get current chromium revision (git hash).
1717 chromium_sha = CheckRunGit(['rev-parse', 'HEAD']).strip()
1718 if not chromium_sha:
1719 raise RuntimeError('Failed to determine Chromium revision for %s' %
1720 revision)
1721 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1722 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1723 # Checkout DEPS file for the current chromium revision.
1724 if self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1725 chromium_sha,
1726 cwd=self.src_cwd):
1727 if self.UpdateDeps(revision, depot, deps_file_path):
1728 diff_command = ['diff',
1729 '--src-prefix=src/',
1730 '--dst-prefix=src/',
1731 '--no-ext-diff',
1732 bisect_utils.FILE_DEPS]
1733 diff_text = CheckRunGit(diff_command, cwd=self.src_cwd)
1734 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1735 else:
1736 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
1737 chromium_sha)
1738 else:
1739 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
1740 chromium_sha)
1741 return (None, None)
1743 def BuildCurrentRevision(self, depot, revision=None):
1744 """Builds chrome and performance_ui_tests on the current revision.
1746 Returns:
1747 True if the build was successful.
1749 if self.opts.debug_ignore_build:
1750 return True
1751 cwd = os.getcwd()
1752 os.chdir(self.src_cwd)
1753 # Fetch build archive for the given revision from the cloud storage when
1754 # the storage bucket is passed.
1755 if self.IsDownloadable(depot) and revision:
1756 deps_patch = None
1757 if depot != 'chromium':
1758 # Create a DEPS patch with new revision for dependency repository.
1759 (revision, deps_patch) = self.CreateDEPSPatch(depot, revision)
1760 # Get SVN revision for the given SHA, since builds are archived using SVN
1761 # revision.
1762 chromium_revision = self.source_control.SVNFindRev(revision)
1763 if not chromium_revision:
1764 raise RuntimeError(
1765 'Failed to determine SVN revision for %s' % revision)
1766 if self.DownloadCurrentBuild(chromium_revision, patch=deps_patch):
1767 os.chdir(cwd)
1768 if deps_patch:
1769 # Reverts the changes to DEPS file.
1770 self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1771 revision,
1772 cwd=self.src_cwd)
1773 return True
1774 raise RuntimeError('Failed to download build archive for revision %s.\n'
1775 'Unfortunately, bisection couldn\'t continue any '
1776 'further. Please try running script without '
1777 '--gs_bucket flag to produce local builds.' % revision)
1780 build_success = self.builder.Build(depot, self.opts)
1781 os.chdir(cwd)
1782 return build_success
1784 def RunGClientHooks(self):
1785 """Runs gclient with runhooks command.
1787 Returns:
1788 True if gclient reports no errors.
1791 if self.opts.debug_ignore_build:
1792 return True
1794 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1796 def TryParseHistogramValuesFromOutput(self, metric, text):
1797 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1799 Args:
1800 metric: The metric as a list of [<trace>, <value>] strings.
1801 text: The text to parse the metric values from.
1803 Returns:
1804 A list of floating point numbers found.
1806 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
1808 text_lines = text.split('\n')
1809 values_list = []
1811 for current_line in text_lines:
1812 if metric_formatted in current_line:
1813 current_line = current_line[len(metric_formatted):]
1815 try:
1816 histogram_values = eval(current_line)
1818 for b in histogram_values['buckets']:
1819 average_for_bucket = float(b['high'] + b['low']) * 0.5
1820 # Extends the list with N-elements with the average for that bucket.
1821 values_list.extend([average_for_bucket] * b['count'])
1822 except:
1823 pass
1825 return values_list
1827 def TryParseResultValuesFromOutput(self, metric, text):
1828 """Attempts to parse a metric in the format RESULT <graph: <trace>.
1830 Args:
1831 metric: The metric as a list of [<trace>, <value>] strings.
1832 text: The text to parse the metric values from.
1834 Returns:
1835 A list of floating point numbers found.
1837 # Format is: RESULT <graph>: <trace>= <value> <units>
1838 metric_formatted = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
1840 text_lines = text.split('\n')
1841 values_list = []
1843 for current_line in text_lines:
1844 # Parse the output from the performance test for the metric we're
1845 # interested in.
1846 metric_re = metric_formatted +\
1847 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1848 metric_re = re.compile(metric_re)
1849 regex_results = metric_re.search(current_line)
1851 if not regex_results is None:
1852 values_list += [regex_results.group('values')]
1853 else:
1854 metric_re = metric_formatted +\
1855 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1856 metric_re = re.compile(metric_re)
1857 regex_results = metric_re.search(current_line)
1859 if not regex_results is None:
1860 metric_values = regex_results.group('values')
1862 values_list += metric_values.split(',')
1864 values_list = [float(v) for v in values_list if IsStringFloat(v)]
1866 # If the metric is times/t, we need to sum the timings in order to get
1867 # similar regression results as the try-bots.
1868 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
1869 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1871 if metric in metrics_to_sum:
1872 if values_list:
1873 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1875 return values_list
1877 def ParseMetricValuesFromOutput(self, metric, text):
1878 """Parses output from performance_ui_tests and retrieves the results for
1879 a given metric.
1881 Args:
1882 metric: The metric as a list of [<trace>, <value>] strings.
1883 text: The text to parse the metric values from.
1885 Returns:
1886 A list of floating point numbers found.
1888 metric_values = self.TryParseResultValuesFromOutput(metric, text)
1890 if not metric_values:
1891 metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
1893 return metric_values
1895 def _GenerateProfileIfNecessary(self, command_args):
1896 """Checks the command line of the performance test for dependencies on
1897 profile generation, and runs tools/perf/generate_profile as necessary.
1899 Args:
1900 command_args: Command line being passed to performance test, as a list.
1902 Returns:
1903 False if profile generation was necessary and failed, otherwise True.
1906 if '--profile-dir' in ' '.join(command_args):
1907 # If we were using python 2.7+, we could just use the argparse
1908 # module's parse_known_args to grab --profile-dir. Since some of the
1909 # bots still run 2.6, have to grab the arguments manually.
1910 arg_dict = {}
1911 args_to_parse = ['--profile-dir', '--browser']
1913 for arg_to_parse in args_to_parse:
1914 for i, current_arg in enumerate(command_args):
1915 if arg_to_parse in current_arg:
1916 current_arg_split = current_arg.split('=')
1918 # Check 2 cases, --arg=<val> and --arg <val>
1919 if len(current_arg_split) == 2:
1920 arg_dict[arg_to_parse] = current_arg_split[1]
1921 elif i + 1 < len(command_args):
1922 arg_dict[arg_to_parse] = command_args[i+1]
1924 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
1926 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
1927 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
1928 return not RunProcess(['python', path_to_generate,
1929 '--profile-type-to-generate', profile_type,
1930 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
1931 return False
1932 return True
1934 def RunPerformanceTestAndParseResults(self, command_to_run, metric,
1935 reset_on_first_run=False, upload_on_last_run=False, results_label=None):
1936 """Runs a performance test on the current revision by executing the
1937 'command_to_run' and parses the results.
1939 Args:
1940 command_to_run: The command to be run to execute the performance test.
1941 metric: The metric to parse out from the results of the performance test.
1943 Returns:
1944 On success, it will return a tuple of the average value of the metric,
1945 and a success code of 0.
1948 if self.opts.debug_ignore_perf_test:
1949 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1951 if IsWindows():
1952 command_to_run = command_to_run.replace('/', r'\\')
1954 args = shlex.split(command_to_run)
1956 if not self._GenerateProfileIfNecessary(args):
1957 return ('Failed to generate profile for performance test.', -1)
1959 # If running a telemetry test for cros, insert the remote ip, and
1960 # identity parameters.
1961 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
1962 if self.opts.target_platform == 'cros' and is_telemetry:
1963 args.append('--remote=%s' % self.opts.cros_remote_ip)
1964 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
1966 cwd = os.getcwd()
1967 os.chdir(self.src_cwd)
1969 start_time = time.time()
1971 metric_values = []
1972 output_of_all_runs = ''
1973 for i in xrange(self.opts.repeat_test_count):
1974 # Can ignore the return code since if the tests fail, it won't return 0.
1975 try:
1976 current_args = copy.copy(args)
1977 if is_telemetry:
1978 if i == 0 and reset_on_first_run:
1979 current_args.append('--reset-results')
1980 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1981 current_args.append('--upload-results')
1982 if results_label:
1983 current_args.append('--results-label=%s' % results_label)
1984 (output, return_code) = RunProcessAndRetrieveOutput(current_args)
1985 except OSError, e:
1986 if e.errno == errno.ENOENT:
1987 err_text = ("Something went wrong running the performance test. "
1988 "Please review the command line:\n\n")
1989 if 'src/' in ' '.join(args):
1990 err_text += ("Check that you haven't accidentally specified a path "
1991 "with src/ in the command.\n\n")
1992 err_text += ' '.join(args)
1993 err_text += '\n'
1995 return (err_text, -1)
1996 raise
1998 output_of_all_runs += output
1999 if self.opts.output_buildbot_annotations:
2000 print output
2002 metric_values += self.ParseMetricValuesFromOutput(metric, output)
2004 elapsed_minutes = (time.time() - start_time) / 60.0
2006 if elapsed_minutes >= self.opts.max_time_minutes or not metric_values:
2007 break
2009 os.chdir(cwd)
2011 # Need to get the average value if there were multiple values.
2012 if metric_values:
2013 truncated_mean = CalculateTruncatedMean(metric_values,
2014 self.opts.truncate_percent)
2015 standard_err = CalculateStandardError(metric_values)
2016 standard_dev = CalculateStandardDeviation(metric_values)
2018 values = {
2019 'mean': truncated_mean,
2020 'std_err': standard_err,
2021 'std_dev': standard_dev,
2022 'values': metric_values,
2025 print 'Results of performance test: %12f %12f' % (
2026 truncated_mean, standard_err)
2027 print
2028 return (values, 0, output_of_all_runs)
2029 else:
2030 return ('Invalid metric specified, or no values returned from '
2031 'performance test.', -1, output_of_all_runs)
2033 def FindAllRevisionsToSync(self, revision, depot):
2034 """Finds all dependant revisions and depots that need to be synced for a
2035 given revision. This is only useful in the git workflow, as an svn depot
2036 may be split into multiple mirrors.
2038 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2039 skia/include. To sync skia/src properly, one has to find the proper
2040 revisions in skia/gyp and skia/include.
2042 Args:
2043 revision: The revision to sync to.
2044 depot: The depot in use at the moment (probably skia).
2046 Returns:
2047 A list of [depot, revision] pairs that need to be synced.
2049 revisions_to_sync = [[depot, revision]]
2051 is_base = ((depot == 'chromium') or (depot == 'cros') or
2052 (depot == 'android-chrome'))
2054 # Some SVN depots were split into multiple git depots, so we need to
2055 # figure out for each mirror which git revision to grab. There's no
2056 # guarantee that the SVN revision will exist for each of the dependant
2057 # depots, so we have to grep the git logs and grab the next earlier one.
2058 if not is_base and\
2059 DEPOT_DEPS_NAME[depot]['depends'] and\
2060 self.source_control.IsGit():
2061 svn_rev = self.source_control.SVNFindRev(revision)
2063 for d in DEPOT_DEPS_NAME[depot]['depends']:
2064 self.ChangeToDepotWorkingDirectory(d)
2066 dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
2068 if dependant_rev:
2069 revisions_to_sync.append([d, dependant_rev])
2071 num_resolved = len(revisions_to_sync)
2072 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
2074 self.ChangeToDepotWorkingDirectory(depot)
2076 if not ((num_resolved - 1) == num_needed):
2077 return None
2079 return revisions_to_sync
2081 def PerformPreBuildCleanup(self):
2082 """Performs necessary cleanup between runs."""
2083 print 'Cleaning up between runs.'
2084 print
2086 # Having these pyc files around between runs can confuse the
2087 # perf tests and cause them to crash.
2088 for (path, _, files) in os.walk(self.src_cwd):
2089 for cur_file in files:
2090 if cur_file.endswith('.pyc'):
2091 path_to_file = os.path.join(path, cur_file)
2092 os.remove(path_to_file)
2094 def PerformWebkitDirectoryCleanup(self, revision):
2095 """If the script is switching between Blink and WebKit during bisect,
2096 its faster to just delete the directory rather than leave it up to git
2097 to sync.
2099 Returns:
2100 True if successful.
2102 if not self.source_control.CheckoutFileAtRevision(
2103 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
2104 return False
2106 cwd = os.getcwd()
2107 os.chdir(self.src_cwd)
2109 is_blink = bisect_utils.IsDepsFileBlink()
2111 os.chdir(cwd)
2113 if not self.source_control.RevertFileToHead(
2114 bisect_utils.FILE_DEPS_GIT):
2115 return False
2117 if self.was_blink != is_blink:
2118 self.was_blink = is_blink
2119 return bisect_utils.RemoveThirdPartyWebkitDirectory()
2120 return True
2122 def PerformCrosChrootCleanup(self):
2123 """Deletes the chroot.
2125 Returns:
2126 True if successful.
2128 cwd = os.getcwd()
2129 self.ChangeToDepotWorkingDirectory('cros')
2130 cmd = [CROS_SDK_PATH, '--delete']
2131 return_code = RunProcess(cmd)
2132 os.chdir(cwd)
2133 return not return_code
2135 def CreateCrosChroot(self):
2136 """Creates a new chroot.
2138 Returns:
2139 True if successful.
2141 cwd = os.getcwd()
2142 self.ChangeToDepotWorkingDirectory('cros')
2143 cmd = [CROS_SDK_PATH, '--create']
2144 return_code = RunProcess(cmd)
2145 os.chdir(cwd)
2146 return not return_code
2148 def PerformPreSyncCleanup(self, revision, depot):
2149 """Performs any necessary cleanup before syncing.
2151 Returns:
2152 True if successful.
2154 if depot == 'chromium':
2155 if not bisect_utils.RemoveThirdPartyLibjingleDirectory():
2156 return False
2157 return self.PerformWebkitDirectoryCleanup(revision)
2158 elif depot == 'cros':
2159 return self.PerformCrosChrootCleanup()
2160 return True
2162 def RunPostSync(self, depot):
2163 """Performs any work after syncing.
2165 Returns:
2166 True if successful.
2168 if self.opts.target_platform == 'android':
2169 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
2170 path_to_src=self.src_cwd):
2171 return False
2173 if depot == 'cros':
2174 return self.CreateCrosChroot()
2175 else:
2176 return self.RunGClientHooks()
2177 return True
2179 def ShouldSkipRevision(self, depot, revision):
2180 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2181 is git based those changes would have no effect.
2183 Args:
2184 depot: The depot being bisected.
2185 revision: Current revision we're synced to.
2187 Returns:
2188 True if we should skip building/testing this revision.
2190 if depot == 'chromium':
2191 if self.source_control.IsGit():
2192 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
2193 output = CheckRunGit(cmd)
2195 files = output.splitlines()
2197 if len(files) == 1 and files[0] == 'DEPS':
2198 return True
2200 return False
2202 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
2203 skippable=False):
2204 """Performs a full sync/build/run of the specified revision.
2206 Args:
2207 revision: The revision to sync to.
2208 depot: The depot that's being used at the moment (src, webkit, etc.)
2209 command_to_run: The command to execute the performance test.
2210 metric: The performance metric being tested.
2212 Returns:
2213 On success, a tuple containing the results of the performance test.
2214 Otherwise, a tuple with the error message.
2216 sync_client = None
2217 if depot == 'chromium' or depot == 'android-chrome':
2218 sync_client = 'gclient'
2219 elif depot == 'cros':
2220 sync_client = 'repo'
2222 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
2224 if not revisions_to_sync:
2225 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
2227 if not self.PerformPreSyncCleanup(revision, depot):
2228 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
2230 success = True
2232 if not self.opts.debug_ignore_sync:
2233 for r in revisions_to_sync:
2234 self.ChangeToDepotWorkingDirectory(r[0])
2236 if sync_client:
2237 self.PerformPreBuildCleanup()
2239 # If you're using gclient to sync, you need to specify the depot you
2240 # want so that all the dependencies sync properly as well.
2241 # ie. gclient sync src@<SHA1>
2242 current_revision = r[1]
2243 if sync_client == 'gclient':
2244 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
2245 current_revision)
2246 if not self.source_control.SyncToRevision(current_revision,
2247 sync_client):
2248 success = False
2250 break
2252 if success:
2253 success = self.RunPostSync(depot)
2254 if success:
2255 if skippable and self.ShouldSkipRevision(depot, revision):
2256 return ('Skipped revision: [%s]' % str(revision),
2257 BUILD_RESULT_SKIPPED)
2259 start_build_time = time.time()
2260 if self.BuildCurrentRevision(depot, revision):
2261 after_build_time = time.time()
2262 results = self.RunPerformanceTestAndParseResults(command_to_run,
2263 metric)
2264 # Restore build output directory once the tests are done, to avoid
2265 # any descrepancy.
2266 if self.IsDownloadable(depot) and revision:
2267 self.BackupOrRestoreOutputdirectory(restore=True)
2269 if results[1] == 0:
2270 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
2271 depot, revision)
2273 if not external_revisions is None:
2274 return (results[0], results[1], external_revisions,
2275 time.time() - after_build_time, after_build_time -
2276 start_build_time)
2277 else:
2278 return ('Failed to parse DEPS file for external revisions.',
2279 BUILD_RESULT_FAIL)
2280 else:
2281 return results
2282 else:
2283 return ('Failed to build revision: [%s]' % (str(revision, )),
2284 BUILD_RESULT_FAIL)
2285 else:
2286 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
2287 else:
2288 return ('Failed to sync revision: [%s]' % (str(revision, )),
2289 BUILD_RESULT_FAIL)
2291 def CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2292 """Given known good and bad values, decide if the current_value passed
2293 or failed.
2295 Args:
2296 current_value: The value of the metric being checked.
2297 known_bad_value: The reference value for a "failed" run.
2298 known_good_value: The reference value for a "passed" run.
2300 Returns:
2301 True if the current_value is closer to the known_good_value than the
2302 known_bad_value.
2304 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2305 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2307 return dist_to_good_value < dist_to_bad_value
2309 def _GetDepotDirectory(self, depot_name):
2310 if depot_name == 'chromium':
2311 return self.src_cwd
2312 elif depot_name == 'cros':
2313 return self.cros_cwd
2314 elif depot_name in DEPOT_NAMES:
2315 return self.depot_cwd[depot_name]
2316 else:
2317 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2318 ' was added without proper support?' % depot_name
2320 def ChangeToDepotWorkingDirectory(self, depot_name):
2321 """Given a depot, changes to the appropriate working directory.
2323 Args:
2324 depot_name: The name of the depot (see DEPOT_NAMES).
2326 os.chdir(self._GetDepotDirectory(depot_name))
2328 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2329 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2330 search_forward=True)
2331 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2332 search_forward=False)
2333 min_revision_data['external']['v8_bleeding_edge'] = r1
2334 max_revision_data['external']['v8_bleeding_edge'] = r2
2336 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2337 min_revision_data['revision']) or
2338 not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2339 max_revision_data['revision'])):
2340 self.warnings.append('Trunk revisions in V8 did not map directly to '
2341 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2342 'did map directly to bleeding_edge revisions, but results might not '
2343 'be valid.')
2345 def _FindNextDepotToBisect(self, current_depot, current_revision,
2346 min_revision_data, max_revision_data):
2347 """Given the state of the bisect, decides which depot the script should
2348 dive into next (if any).
2350 Args:
2351 current_depot: Current depot being bisected.
2352 current_revision: Current revision synced to.
2353 min_revision_data: Data about the earliest revision in the bisect range.
2354 max_revision_data: Data about the latest revision in the bisect range.
2356 Returns:
2357 The depot to bisect next, or None.
2359 external_depot = None
2360 for next_depot in DEPOT_NAMES:
2361 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2362 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2363 continue
2365 if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
2366 min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
2367 continue
2369 if current_depot == 'v8':
2370 # We grab the bleeding_edge info here rather than earlier because we
2371 # finally have the revision range. From that we can search forwards and
2372 # backwards to try to match trunk revisions to bleeding_edge.
2373 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2375 if (min_revision_data['external'][next_depot] ==
2376 max_revision_data['external'][next_depot]):
2377 continue
2379 if (min_revision_data['external'][next_depot] and
2380 max_revision_data['external'][next_depot]):
2381 external_depot = next_depot
2382 break
2384 return external_depot
2386 def PrepareToBisectOnDepot(self,
2387 current_depot,
2388 end_revision,
2389 start_revision,
2390 previous_depot,
2391 previous_revision):
2392 """Changes to the appropriate directory and gathers a list of revisions
2393 to bisect between |start_revision| and |end_revision|.
2395 Args:
2396 current_depot: The depot we want to bisect.
2397 end_revision: End of the revision range.
2398 start_revision: Start of the revision range.
2399 previous_depot: The depot we were previously bisecting.
2400 previous_revision: The last revision we synced to on |previous_depot|.
2402 Returns:
2403 A list containing the revisions between |start_revision| and
2404 |end_revision| inclusive.
2406 # Change into working directory of external library to run
2407 # subsequent commands.
2408 self.ChangeToDepotWorkingDirectory(current_depot)
2410 # V8 (and possibly others) is merged in periodically. Bisecting
2411 # this directory directly won't give much good info.
2412 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2413 config_path = os.path.join(self.src_cwd, '..')
2414 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2415 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2416 return []
2417 if bisect_utils.RunGClient(
2418 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2419 return []
2421 if current_depot == 'v8_bleeding_edge':
2422 self.ChangeToDepotWorkingDirectory('chromium')
2424 shutil.move('v8', 'v8.bak')
2425 shutil.move('v8_bleeding_edge', 'v8')
2427 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2428 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2430 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2431 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2433 self.ChangeToDepotWorkingDirectory(current_depot)
2435 depot_revision_list = self.GetRevisionList(current_depot,
2436 end_revision,
2437 start_revision)
2439 self.ChangeToDepotWorkingDirectory('chromium')
2441 return depot_revision_list
2443 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2444 """Gathers reference values by running the performance tests on the
2445 known good and bad revisions.
2447 Args:
2448 good_rev: The last known good revision where the performance regression
2449 has not occurred yet.
2450 bad_rev: A revision where the performance regression has already occurred.
2451 cmd: The command to execute the performance test.
2452 metric: The metric being tested for regression.
2454 Returns:
2455 A tuple with the results of building and running each revision.
2457 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
2458 target_depot,
2459 cmd,
2460 metric)
2462 good_run_results = None
2464 if not bad_run_results[1]:
2465 good_run_results = self.SyncBuildAndRunRevision(good_rev,
2466 target_depot,
2467 cmd,
2468 metric)
2470 return (bad_run_results, good_run_results)
2472 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
2473 """Adds new revisions to the revision_data dict and initializes them.
2475 Args:
2476 revisions: List of revisions to add.
2477 depot: Depot that's currently in use (src, webkit, etc...)
2478 sort: Sorting key for displaying revisions.
2479 revision_data: A dict to add the new revisions into. Existing revisions
2480 will have their sort keys offset.
2483 num_depot_revisions = len(revisions)
2485 for _, v in revision_data.iteritems():
2486 if v['sort'] > sort:
2487 v['sort'] += num_depot_revisions
2489 for i in xrange(num_depot_revisions):
2490 r = revisions[i]
2492 revision_data[r] = {'revision' : r,
2493 'depot' : depot,
2494 'value' : None,
2495 'perf_time' : 0,
2496 'build_time' : 0,
2497 'passed' : '?',
2498 'sort' : i + sort + 1}
2500 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2501 if self.opts.output_buildbot_annotations:
2502 step_name = 'Bisection Range: [%s - %s]' % (
2503 revision_list[len(revision_list)-1], revision_list[0])
2504 bisect_utils.OutputAnnotationStepStart(step_name)
2506 print
2507 print 'Revisions to bisect on [%s]:' % depot
2508 for revision_id in revision_list:
2509 print ' -> %s' % (revision_id, )
2510 print
2512 if self.opts.output_buildbot_annotations:
2513 bisect_utils.OutputAnnotationStepClosed()
2515 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2516 """Checks to see if changes to DEPS file occurred, and that the revision
2517 range also includes the change to .DEPS.git. If it doesn't, attempts to
2518 expand the revision range to include it.
2520 Args:
2521 bad_rev: First known bad revision.
2522 good_revision: Last known good revision.
2524 Returns:
2525 A tuple with the new bad and good revisions.
2527 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2528 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2529 'DEPS', good_revision, bad_revision)
2531 if changes_to_deps:
2532 # DEPS file was changed, search from the oldest change to DEPS file to
2533 # bad_revision to see if there are matching .DEPS.git changes.
2534 oldest_deps_change = changes_to_deps[-1]
2535 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2536 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2538 if len(changes_to_deps) != len(changes_to_gitdeps):
2539 # Grab the timestamp of the last DEPS change
2540 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2541 output = CheckRunGit(cmd)
2542 commit_time = int(output)
2544 # Try looking for a commit that touches the .DEPS.git file in the
2545 # next 15 minutes after the DEPS file change.
2546 cmd = ['log', '--format=%H', '-1',
2547 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2548 'origin/master', bisect_utils.FILE_DEPS_GIT]
2549 output = CheckRunGit(cmd)
2550 output = output.strip()
2551 if output:
2552 self.warnings.append('Detected change to DEPS and modified '
2553 'revision range to include change to .DEPS.git')
2554 return (output, good_revision)
2555 else:
2556 self.warnings.append('Detected change to DEPS but couldn\'t find '
2557 'matching change to .DEPS.git')
2558 return (bad_revision, good_revision)
2560 def CheckIfRevisionsInProperOrder(self,
2561 target_depot,
2562 good_revision,
2563 bad_revision):
2564 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2566 Args:
2567 good_revision: Number/tag of the known good revision.
2568 bad_revision: Number/tag of the known bad revision.
2570 Returns:
2571 True if the revisions are in the proper order (good earlier than bad).
2573 if self.source_control.IsGit() and target_depot != 'cros':
2574 cmd = ['log', '--format=%ct', '-1', good_revision]
2575 cwd = self._GetDepotDirectory(target_depot)
2577 output = CheckRunGit(cmd, cwd=cwd)
2578 good_commit_time = int(output)
2580 cmd = ['log', '--format=%ct', '-1', bad_revision]
2581 output = CheckRunGit(cmd, cwd=cwd)
2582 bad_commit_time = int(output)
2584 return good_commit_time <= bad_commit_time
2585 else:
2586 # Cros/svn use integers
2587 return int(good_revision) <= int(bad_revision)
2589 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2590 """Given known good and bad revisions, run a binary search on all
2591 intermediate revisions to determine the CL where the performance regression
2592 occurred.
2594 Args:
2595 command_to_run: Specify the command to execute the performance test.
2596 good_revision: Number/tag of the known good revision.
2597 bad_revision: Number/tag of the known bad revision.
2598 metric: The performance metric to monitor.
2600 Returns:
2601 A dict with 2 members, 'revision_data' and 'error'. On success,
2602 'revision_data' will contain a dict mapping revision ids to
2603 data about that revision. Each piece of revision data consists of a
2604 dict with the following keys:
2606 'passed': Represents whether the performance test was successful at
2607 that revision. Possible values include: 1 (passed), 0 (failed),
2608 '?' (skipped), 'F' (build failed).
2609 'depot': The depot that this revision is from (ie. WebKit)
2610 'external': If the revision is a 'src' revision, 'external' contains
2611 the revisions of each of the external libraries.
2612 'sort': A sort value for sorting the dict in order of commits.
2614 For example:
2616 'error':None,
2617 'revision_data':
2619 'CL #1':
2621 'passed':False,
2622 'depot':'chromium',
2623 'external':None,
2624 'sort':0
2629 If an error occurred, the 'error' field will contain the message and
2630 'revision_data' will be empty.
2632 results = {'revision_data' : {},
2633 'error' : None}
2635 # Choose depot to bisect first
2636 target_depot = 'chromium'
2637 if self.opts.target_platform == 'cros':
2638 target_depot = 'cros'
2639 elif self.opts.target_platform == 'android-chrome':
2640 target_depot = 'android-chrome'
2642 cwd = os.getcwd()
2643 self.ChangeToDepotWorkingDirectory(target_depot)
2645 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2646 bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2647 target_depot, 100)
2648 good_revision = self.source_control.ResolveToRevision(good_revision_in,
2649 target_depot, -100)
2651 os.chdir(cwd)
2654 if bad_revision is None:
2655 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2656 return results
2658 if good_revision is None:
2659 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2660 return results
2662 # Check that they didn't accidentally swap good and bad revisions.
2663 if not self.CheckIfRevisionsInProperOrder(
2664 target_depot, good_revision, bad_revision):
2665 results['error'] = 'bad_revision < good_revision, did you swap these '\
2666 'by mistake?'
2667 return results
2669 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2670 bad_revision, good_revision)
2672 if self.opts.output_buildbot_annotations:
2673 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2675 print 'Gathering revision range for bisection.'
2676 # Retrieve a list of revisions to do bisection on.
2677 src_revision_list = self.GetRevisionList(target_depot,
2678 bad_revision,
2679 good_revision)
2681 if self.opts.output_buildbot_annotations:
2682 bisect_utils.OutputAnnotationStepClosed()
2684 if src_revision_list:
2685 # revision_data will store information about a revision such as the
2686 # depot it came from, the webkit/V8 revision at that time,
2687 # performance timing, build state, etc...
2688 revision_data = results['revision_data']
2690 # revision_list is the list we're binary searching through at the moment.
2691 revision_list = []
2693 sort_key_ids = 0
2695 for current_revision_id in src_revision_list:
2696 sort_key_ids += 1
2698 revision_data[current_revision_id] = {'value' : None,
2699 'passed' : '?',
2700 'depot' : target_depot,
2701 'external' : None,
2702 'perf_time' : 0,
2703 'build_time' : 0,
2704 'sort' : sort_key_ids}
2705 revision_list.append(current_revision_id)
2707 min_revision = 0
2708 max_revision = len(revision_list) - 1
2710 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2712 if self.opts.output_buildbot_annotations:
2713 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2715 print 'Gathering reference values for bisection.'
2717 # Perform the performance tests on the good and bad revisions, to get
2718 # reference values.
2719 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
2720 bad_revision,
2721 command_to_run,
2722 metric,
2723 target_depot)
2725 if self.opts.output_buildbot_annotations:
2726 bisect_utils.OutputAnnotationStepClosed()
2728 if bad_results[1]:
2729 results['error'] = ('An error occurred while building and running '
2730 'the \'bad\' reference value. The bisect cannot continue without '
2731 'a working \'bad\' revision to start from.\n\nError: %s' %
2732 bad_results[0])
2733 return results
2735 if good_results[1]:
2736 results['error'] = ('An error occurred while building and running '
2737 'the \'good\' reference value. The bisect cannot continue without '
2738 'a working \'good\' revision to start from.\n\nError: %s' %
2739 good_results[0])
2740 return results
2743 # We need these reference values to determine if later runs should be
2744 # classified as pass or fail.
2745 known_bad_value = bad_results[0]
2746 known_good_value = good_results[0]
2748 # Can just mark the good and bad revisions explicitly here since we
2749 # already know the results.
2750 bad_revision_data = revision_data[revision_list[0]]
2751 bad_revision_data['external'] = bad_results[2]
2752 bad_revision_data['perf_time'] = bad_results[3]
2753 bad_revision_data['build_time'] = bad_results[4]
2754 bad_revision_data['passed'] = False
2755 bad_revision_data['value'] = known_bad_value
2757 good_revision_data = revision_data[revision_list[max_revision]]
2758 good_revision_data['external'] = good_results[2]
2759 good_revision_data['perf_time'] = good_results[3]
2760 good_revision_data['build_time'] = good_results[4]
2761 good_revision_data['passed'] = True
2762 good_revision_data['value'] = known_good_value
2764 next_revision_depot = target_depot
2766 while True:
2767 if not revision_list:
2768 break
2770 min_revision_data = revision_data[revision_list[min_revision]]
2771 max_revision_data = revision_data[revision_list[max_revision]]
2773 if max_revision - min_revision <= 1:
2774 current_depot = min_revision_data['depot']
2775 if min_revision_data['passed'] == '?':
2776 next_revision_index = min_revision
2777 elif max_revision_data['passed'] == '?':
2778 next_revision_index = max_revision
2779 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2780 previous_revision = revision_list[min_revision]
2781 # If there were changes to any of the external libraries we track,
2782 # should bisect the changes there as well.
2783 external_depot = self._FindNextDepotToBisect(current_depot,
2784 previous_revision, min_revision_data, max_revision_data)
2786 # If there was no change in any of the external depots, the search
2787 # is over.
2788 if not external_depot:
2789 if current_depot == 'v8':
2790 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2791 'continue any further. The script can only bisect into '
2792 'V8\'s bleeding_edge repository if both the current and '
2793 'previous revisions in trunk map directly to revisions in '
2794 'bleeding_edge.')
2795 break
2797 earliest_revision = max_revision_data['external'][external_depot]
2798 latest_revision = min_revision_data['external'][external_depot]
2800 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
2801 latest_revision,
2802 earliest_revision,
2803 next_revision_depot,
2804 previous_revision)
2806 if not new_revision_list:
2807 results['error'] = 'An error occurred attempting to retrieve'\
2808 ' revision range: [%s..%s]' % \
2809 (earliest_revision, latest_revision)
2810 return results
2812 self.AddRevisionsIntoRevisionData(new_revision_list,
2813 external_depot,
2814 min_revision_data['sort'],
2815 revision_data)
2817 # Reset the bisection and perform it on the newly inserted
2818 # changelists.
2819 revision_list = new_revision_list
2820 min_revision = 0
2821 max_revision = len(revision_list) - 1
2822 sort_key_ids += len(revision_list)
2824 print 'Regression in metric:%s appears to be the result of changes'\
2825 ' in [%s].' % (metric, external_depot)
2827 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
2829 continue
2830 else:
2831 break
2832 else:
2833 next_revision_index = int((max_revision - min_revision) / 2) +\
2834 min_revision
2836 next_revision_id = revision_list[next_revision_index]
2837 next_revision_data = revision_data[next_revision_id]
2838 next_revision_depot = next_revision_data['depot']
2840 self.ChangeToDepotWorkingDirectory(next_revision_depot)
2842 if self.opts.output_buildbot_annotations:
2843 step_name = 'Working on [%s]' % next_revision_id
2844 bisect_utils.OutputAnnotationStepStart(step_name)
2846 print 'Working on revision: [%s]' % next_revision_id
2848 run_results = self.SyncBuildAndRunRevision(next_revision_id,
2849 next_revision_depot,
2850 command_to_run,
2851 metric, skippable=True)
2853 # If the build is successful, check whether or not the metric
2854 # had regressed.
2855 if not run_results[1]:
2856 if len(run_results) > 2:
2857 next_revision_data['external'] = run_results[2]
2858 next_revision_data['perf_time'] = run_results[3]
2859 next_revision_data['build_time'] = run_results[4]
2861 passed_regression = self.CheckIfRunPassed(run_results[0],
2862 known_good_value,
2863 known_bad_value)
2865 next_revision_data['passed'] = passed_regression
2866 next_revision_data['value'] = run_results[0]
2868 if passed_regression:
2869 max_revision = next_revision_index
2870 else:
2871 min_revision = next_revision_index
2872 else:
2873 if run_results[1] == BUILD_RESULT_SKIPPED:
2874 next_revision_data['passed'] = 'Skipped'
2875 elif run_results[1] == BUILD_RESULT_FAIL:
2876 next_revision_data['passed'] = 'Build Failed'
2878 print run_results[0]
2880 # If the build is broken, remove it and redo search.
2881 revision_list.pop(next_revision_index)
2883 max_revision -= 1
2885 if self.opts.output_buildbot_annotations:
2886 self._PrintPartialResults(results)
2887 bisect_utils.OutputAnnotationStepClosed()
2888 else:
2889 # Weren't able to sync and retrieve the revision range.
2890 results['error'] = 'An error occurred attempting to retrieve revision '\
2891 'range: [%s..%s]' % (good_revision, bad_revision)
2893 return results
2895 def _PrintPartialResults(self, results_dict):
2896 revision_data = results_dict['revision_data']
2897 revision_data_sorted = sorted(revision_data.iteritems(),
2898 key = lambda x: x[1]['sort'])
2899 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2901 self._PrintTestedCommitsTable(revision_data_sorted,
2902 results_dict['first_working_revision'],
2903 results_dict['last_broken_revision'],
2904 100, final_step=False)
2906 def _PrintConfidence(self, results_dict):
2907 # The perf dashboard specifically looks for the string
2908 # "Confidence in Bisection Results: 100%" to decide whether or not
2909 # to cc the author(s). If you change this, please update the perf
2910 # dashboard as well.
2911 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
2913 def _PrintBanner(self, results_dict):
2914 print
2915 print " __o_\___ Aw Snap! We hit a speed bump!"
2916 print "=-O----O-'__.~.___________________________________"
2917 print
2918 print 'Bisect reproduced a %.02f%% (+-%.02f%%) change in the %s metric.' % (
2919 results_dict['regression_size'], results_dict['regression_std_err'],
2920 '/'.join(self.opts.metric))
2921 self._PrintConfidence(results_dict)
2923 def _PrintFailedBanner(self, results_dict):
2924 print
2925 print ('Bisect could not reproduce a change in the '
2926 '%s/%s metric.' % (self.opts.metric[0], self.opts.metric[1]))
2927 print
2928 self._PrintConfidence(results_dict)
2930 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
2931 info = self.source_control.QueryRevisionInfo(cl,
2932 self._GetDepotDirectory(depot))
2933 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
2934 try:
2935 # Format is "git-svn-id: svn://....@123456 <other data>"
2936 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
2937 svn_revision = svn_line[0].split('@')
2938 svn_revision = svn_revision[1].split(' ')[0]
2939 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
2940 except IndexError:
2941 return ''
2942 return ''
2944 def _PrintRevisionInfo(self, cl, info, depot=None):
2945 # The perf dashboard specifically looks for the string
2946 # "Author : " to parse out who to cc on a bug. If you change the
2947 # formatting here, please update the perf dashboard as well.
2948 print
2949 print 'Subject : %s' % info['subject']
2950 print 'Author : %s' % info['author']
2951 if not info['email'].startswith(info['author']):
2952 print 'Email : %s' % info['email']
2953 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
2954 if commit_link:
2955 print 'Link : %s' % commit_link
2956 else:
2957 print
2958 print 'Failed to parse svn revision from body:'
2959 print
2960 print info['body']
2961 print
2962 print 'Commit : %s' % cl
2963 print 'Date : %s' % info['date']
2965 def _PrintTestedCommitsTable(self, revision_data_sorted,
2966 first_working_revision, last_broken_revision, confidence,
2967 final_step=True):
2968 print
2969 if final_step:
2970 print 'Tested commits:'
2971 else:
2972 print 'Partial results:'
2973 print ' %20s %70s %12s %14s %13s' % ('Depot'.center(20, ' '),
2974 'Commit SHA'.center(70, ' '), 'Mean'.center(12, ' '),
2975 'Std. Error'.center(14, ' '), 'State'.center(13, ' '))
2976 state = 0
2977 for current_id, current_data in revision_data_sorted:
2978 if current_data['value']:
2979 if (current_id == last_broken_revision or
2980 current_id == first_working_revision):
2981 # If confidence is too low, don't add this empty line since it's
2982 # used to put focus on a suspected CL.
2983 if confidence and final_step:
2984 print
2985 state += 1
2986 if state == 2 and not final_step:
2987 # Just want a separation between "bad" and "good" cl's.
2988 print
2990 state_str = 'Bad'
2991 if state == 1 and final_step:
2992 state_str = 'Suspected CL'
2993 elif state == 2:
2994 state_str = 'Good'
2996 # If confidence is too low, don't bother outputting good/bad.
2997 if not confidence:
2998 state_str = ''
2999 state_str = state_str.center(13, ' ')
3001 std_error = ('+-%.02f' %
3002 current_data['value']['std_err']).center(14, ' ')
3003 mean = ('%.02f' % current_data['value']['mean']).center(12, ' ')
3004 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3005 current_data['depot'])
3006 if not cl_link:
3007 cl_link = current_id
3008 print ' %20s %70s %12s %14s %13s' % (
3009 current_data['depot'].center(20, ' '), cl_link.center(70, ' '),
3010 mean, std_error, state_str)
3012 def _PrintReproSteps(self):
3013 print
3014 print 'To reproduce locally:'
3015 print '$ ' + self.opts.command
3016 if bisect_utils.IsTelemetryCommand(self.opts.command):
3017 print
3018 print 'Also consider passing --profiler=list to see available profilers.'
3020 def _PrintOtherRegressions(self, other_regressions, revision_data):
3021 print
3022 print 'Other regressions may have occurred:'
3023 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3024 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3025 for regression in other_regressions:
3026 current_id, previous_id, confidence = regression
3027 current_data = revision_data[current_id]
3028 previous_data = revision_data[previous_id]
3030 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3031 current_data['depot'])
3032 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
3033 previous_data['depot'])
3035 # If we can't map it to a viewable URL, at least show the original hash.
3036 if not current_link:
3037 current_link = current_id
3038 if not previous_link:
3039 previous_link = previous_id
3041 print ' %8s %70s %s' % (
3042 current_data['depot'], current_link,
3043 ('%d%%' % confidence).center(10, ' '))
3044 print ' %8s %70s' % (
3045 previous_data['depot'], previous_link)
3046 print
3048 def _PrintStepTime(self, revision_data_sorted):
3049 step_perf_time_avg = 0.0
3050 step_build_time_avg = 0.0
3051 step_count = 0.0
3052 for _, current_data in revision_data_sorted:
3053 if current_data['value']:
3054 step_perf_time_avg += current_data['perf_time']
3055 step_build_time_avg += current_data['build_time']
3056 step_count += 1
3057 if step_count:
3058 step_perf_time_avg = step_perf_time_avg / step_count
3059 step_build_time_avg = step_build_time_avg / step_count
3060 print
3061 print 'Average build time : %s' % datetime.timedelta(
3062 seconds=int(step_build_time_avg))
3063 print 'Average test time : %s' % datetime.timedelta(
3064 seconds=int(step_perf_time_avg))
3066 def _PrintWarnings(self):
3067 if not self.warnings:
3068 return
3069 print
3070 print 'WARNINGS:'
3071 for w in set(self.warnings):
3072 print ' !!! %s' % w
3074 def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
3075 other_regressions = []
3076 previous_values = []
3077 previous_id = None
3078 for current_id, current_data in revision_data_sorted:
3079 current_values = current_data['value']
3080 if current_values:
3081 current_values = current_values['values']
3082 if previous_values:
3083 confidence = CalculateConfidence(previous_values, [current_values])
3084 mean_of_prev_runs = CalculateMean(sum(previous_values, []))
3085 mean_of_current_runs = CalculateMean(current_values)
3087 # Check that the potential regression is in the same direction as
3088 # the overall regression. If the mean of the previous runs < the
3089 # mean of the current runs, this local regression is in same
3090 # direction.
3091 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
3092 is_same_direction = (prev_less_than_current if
3093 bad_greater_than_good else not prev_less_than_current)
3095 # Only report potential regressions with high confidence.
3096 if is_same_direction and confidence > 50:
3097 other_regressions.append([current_id, previous_id, confidence])
3098 previous_values.append(current_values)
3099 previous_id = current_id
3100 return other_regressions
3103 def _GetResultsDict(self, revision_data, revision_data_sorted):
3104 # Find range where it possibly broke.
3105 first_working_revision = None
3106 first_working_revision_index = -1
3107 last_broken_revision = None
3108 last_broken_revision_index = -1
3110 for i in xrange(len(revision_data_sorted)):
3111 k, v = revision_data_sorted[i]
3112 if v['passed'] == 1:
3113 if not first_working_revision:
3114 first_working_revision = k
3115 first_working_revision_index = i
3117 if not v['passed']:
3118 last_broken_revision = k
3119 last_broken_revision_index = i
3121 if last_broken_revision != None and first_working_revision != None:
3122 broken_means = []
3123 for i in xrange(0, last_broken_revision_index + 1):
3124 if revision_data_sorted[i][1]['value']:
3125 broken_means.append(revision_data_sorted[i][1]['value']['values'])
3127 working_means = []
3128 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
3129 if revision_data_sorted[i][1]['value']:
3130 working_means.append(revision_data_sorted[i][1]['value']['values'])
3132 # Flatten the lists to calculate mean of all values.
3133 working_mean = sum(working_means, [])
3134 broken_mean = sum(broken_means, [])
3136 # Calculate the approximate size of the regression
3137 mean_of_bad_runs = CalculateMean(broken_mean)
3138 mean_of_good_runs = CalculateMean(working_mean)
3140 regression_size = math.fabs(max(mean_of_good_runs, mean_of_bad_runs) /
3141 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0 - 100.0
3143 regression_std_err = math.fabs(CalculatePooledStandardError(
3144 [working_mean, broken_mean]) /
3145 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
3147 # Give a "confidence" in the bisect. At the moment we use how distinct the
3148 # values are before and after the last broken revision, and how noisy the
3149 # overall graph is.
3150 confidence = CalculateConfidence(working_means, broken_means)
3152 culprit_revisions = []
3154 cwd = os.getcwd()
3155 self.ChangeToDepotWorkingDirectory(
3156 revision_data[last_broken_revision]['depot'])
3158 if revision_data[last_broken_revision]['depot'] == 'cros':
3159 # Want to get a list of all the commits and what depots they belong
3160 # to so that we can grab info about each.
3161 cmd = ['repo', 'forall', '-c',
3162 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3163 last_broken_revision, first_working_revision + 1)]
3164 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
3166 changes = []
3167 assert not return_code, 'An error occurred while running'\
3168 ' "%s"' % ' '.join(cmd)
3169 last_depot = None
3170 cwd = os.getcwd()
3171 for l in output.split('\n'):
3172 if l:
3173 # Output will be in form:
3174 # /path_to_depot
3175 # /path_to_other_depot
3176 # <SHA1>
3177 # /path_again
3178 # <SHA1>
3179 # etc.
3180 if l[0] == '/':
3181 last_depot = l
3182 else:
3183 contents = l.split(' ')
3184 if len(contents) > 1:
3185 changes.append([last_depot, contents[0]])
3186 for c in changes:
3187 os.chdir(c[0])
3188 info = self.source_control.QueryRevisionInfo(c[1])
3189 culprit_revisions.append((c[1], info, None))
3190 else:
3191 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
3192 k, v = revision_data_sorted[i]
3193 if k == first_working_revision:
3194 break
3195 self.ChangeToDepotWorkingDirectory(v['depot'])
3196 info = self.source_control.QueryRevisionInfo(k)
3197 culprit_revisions.append((k, info, v['depot']))
3198 os.chdir(cwd)
3200 # Check for any other possible regression ranges
3201 other_regressions = self._FindOtherRegressions(revision_data_sorted,
3202 mean_of_bad_runs > mean_of_good_runs)
3204 # Check for warnings:
3205 if len(culprit_revisions) > 1:
3206 self.warnings.append('Due to build errors, regression range could '
3207 'not be narrowed down to a single commit.')
3208 if self.opts.repeat_test_count == 1:
3209 self.warnings.append('Tests were only set to run once. This may '
3210 'be insufficient to get meaningful results.')
3211 if confidence < 100:
3212 if confidence:
3213 self.warnings.append(
3214 'Confidence is less than 100%. There could be other candidates for '
3215 'this regression. Try bisecting again with increased repeat_count '
3216 'or on a sub-metric that shows the regression more clearly.')
3217 else:
3218 self.warnings.append(
3219 'Confidence is 0%. Try bisecting again on another platform, with '
3220 'increased repeat_count or on a sub-metric that shows the regression '
3221 'more clearly.')
3223 return {
3224 'first_working_revision': first_working_revision,
3225 'last_broken_revision': last_broken_revision,
3226 'culprit_revisions': culprit_revisions,
3227 'other_regressions': other_regressions,
3228 'regression_size': regression_size,
3229 'regression_std_err': regression_std_err,
3230 'confidence': confidence,
3233 def FormatAndPrintResults(self, bisect_results):
3234 """Prints the results from a bisection run in a readable format.
3236 Args
3237 bisect_results: The results from a bisection test run.
3239 revision_data = bisect_results['revision_data']
3240 revision_data_sorted = sorted(revision_data.iteritems(),
3241 key = lambda x: x[1]['sort'])
3242 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3244 if self.opts.output_buildbot_annotations:
3245 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
3247 print
3248 print 'Full results of bisection:'
3249 for current_id, current_data in revision_data_sorted:
3250 build_status = current_data['passed']
3252 if type(build_status) is bool:
3253 if build_status:
3254 build_status = 'Good'
3255 else:
3256 build_status = 'Bad'
3258 print ' %20s %40s %s' % (current_data['depot'],
3259 current_id, build_status)
3260 print
3262 if self.opts.output_buildbot_annotations:
3263 bisect_utils.OutputAnnotationStepClosed()
3264 # The perf dashboard scrapes the "results" step in order to comment on
3265 # bugs. If you change this, please update the perf dashboard as well.
3266 bisect_utils.OutputAnnotationStepStart('Results')
3268 if results_dict['culprit_revisions'] and results_dict['confidence']:
3269 self._PrintBanner(results_dict)
3270 for culprit in results_dict['culprit_revisions']:
3271 cl, info, depot = culprit
3272 self._PrintRevisionInfo(cl, info, depot)
3273 self._PrintReproSteps()
3274 if results_dict['other_regressions']:
3275 self._PrintOtherRegressions(results_dict['other_regressions'],
3276 revision_data)
3277 else:
3278 self._PrintFailedBanner(results_dict)
3279 self._PrintReproSteps()
3281 self._PrintTestedCommitsTable(revision_data_sorted,
3282 results_dict['first_working_revision'],
3283 results_dict['last_broken_revision'],
3284 results_dict['confidence'])
3285 self._PrintStepTime(revision_data_sorted)
3286 self._PrintWarnings()
3288 if self.opts.output_buildbot_annotations:
3289 bisect_utils.OutputAnnotationStepClosed()
3292 def DetermineAndCreateSourceControl(opts):
3293 """Attempts to determine the underlying source control workflow and returns
3294 a SourceControl object.
3296 Returns:
3297 An instance of a SourceControl object, or None if the current workflow
3298 is unsupported.
3301 (output, _) = RunGit(['rev-parse', '--is-inside-work-tree'])
3303 if output.strip() == 'true':
3304 return GitSourceControl(opts)
3306 return None
3309 def IsPlatformSupported(opts):
3310 """Checks that this platform and build system are supported.
3312 Args:
3313 opts: The options parsed from the command line.
3315 Returns:
3316 True if the platform and build system are supported.
3318 # Haven't tested the script out on any other platforms yet.
3319 supported = ['posix', 'nt']
3320 return os.name in supported
3323 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3324 """Removes the directory tree specified, and then creates an empty
3325 directory in the same location (if not specified to skip).
3327 Args:
3328 path_to_dir: Path to the directory tree.
3329 skip_makedir: Whether to skip creating empty directory, default is False.
3331 Returns:
3332 True if successful, False if an error occurred.
3334 try:
3335 if os.path.exists(path_to_dir):
3336 shutil.rmtree(path_to_dir)
3337 except OSError, e:
3338 if e.errno != errno.ENOENT:
3339 return False
3341 if not skip_makedir:
3342 return MaybeMakeDirectory(path_to_dir)
3344 return True
3347 def RemoveBuildFiles():
3348 """Removes build files from previous runs."""
3349 if RmTreeAndMkDir(os.path.join('out', 'Release')):
3350 if RmTreeAndMkDir(os.path.join('build', 'Release')):
3351 return True
3352 return False
3355 class BisectOptions(object):
3356 """Options to be used when running bisection."""
3357 def __init__(self):
3358 super(BisectOptions, self).__init__()
3360 self.target_platform = 'chromium'
3361 self.build_preference = None
3362 self.good_revision = None
3363 self.bad_revision = None
3364 self.use_goma = None
3365 self.cros_board = None
3366 self.cros_remote_ip = None
3367 self.repeat_test_count = 20
3368 self.truncate_percent = 25
3369 self.max_time_minutes = 20
3370 self.metric = None
3371 self.command = None
3372 self.output_buildbot_annotations = None
3373 self.no_custom_deps = False
3374 self.working_directory = None
3375 self.extra_src = None
3376 self.debug_ignore_build = None
3377 self.debug_ignore_sync = None
3378 self.debug_ignore_perf_test = None
3379 self.gs_bucket = None
3380 self.target_arch = 'ia32'
3381 self.builder_host = None
3382 self.builder_port = None
3384 def _CreateCommandLineParser(self):
3385 """Creates a parser with bisect options.
3387 Returns:
3388 An instance of optparse.OptionParser.
3390 usage = ('%prog [options] [-- chromium-options]\n'
3391 'Perform binary search on revision history to find a minimal '
3392 'range of revisions where a peformance metric regressed.\n')
3394 parser = optparse.OptionParser(usage=usage)
3396 group = optparse.OptionGroup(parser, 'Bisect options')
3397 group.add_option('-c', '--command',
3398 type='str',
3399 help='A command to execute your performance test at' +
3400 ' each point in the bisection.')
3401 group.add_option('-b', '--bad_revision',
3402 type='str',
3403 help='A bad revision to start bisection. ' +
3404 'Must be later than good revision. May be either a git' +
3405 ' or svn revision.')
3406 group.add_option('-g', '--good_revision',
3407 type='str',
3408 help='A revision to start bisection where performance' +
3409 ' test is known to pass. Must be earlier than the ' +
3410 'bad revision. May be either a git or svn revision.')
3411 group.add_option('-m', '--metric',
3412 type='str',
3413 help='The desired metric to bisect on. For example ' +
3414 '"vm_rss_final_b/vm_rss_f_b"')
3415 group.add_option('-r', '--repeat_test_count',
3416 type='int',
3417 default=20,
3418 help='The number of times to repeat the performance '
3419 'test. Values will be clamped to range [1, 100]. '
3420 'Default value is 20.')
3421 group.add_option('--max_time_minutes',
3422 type='int',
3423 default=20,
3424 help='The maximum time (in minutes) to take running the '
3425 'performance tests. The script will run the performance '
3426 'tests according to --repeat_test_count, so long as it '
3427 'doesn\'t exceed --max_time_minutes. Values will be '
3428 'clamped to range [1, 60].'
3429 'Default value is 20.')
3430 group.add_option('-t', '--truncate_percent',
3431 type='int',
3432 default=25,
3433 help='The highest/lowest % are discarded to form a '
3434 'truncated mean. Values will be clamped to range [0, '
3435 '25]. Default value is 25 (highest/lowest 25% will be '
3436 'discarded).')
3437 parser.add_option_group(group)
3439 group = optparse.OptionGroup(parser, 'Build options')
3440 group.add_option('-w', '--working_directory',
3441 type='str',
3442 help='Path to the working directory where the script '
3443 'will do an initial checkout of the chromium depot. The '
3444 'files will be placed in a subdirectory "bisect" under '
3445 'working_directory and that will be used to perform the '
3446 'bisection. This parameter is optional, if it is not '
3447 'supplied, the script will work from the current depot.')
3448 group.add_option('--build_preference',
3449 type='choice',
3450 choices=['msvs', 'ninja', 'make'],
3451 help='The preferred build system to use. On linux/mac '
3452 'the options are make/ninja. On Windows, the options '
3453 'are msvs/ninja.')
3454 group.add_option('--target_platform',
3455 type='choice',
3456 choices=['chromium', 'cros', 'android', 'android-chrome'],
3457 default='chromium',
3458 help='The target platform. Choices are "chromium" '
3459 '(current platform), "cros", or "android". If you '
3460 'specify something other than "chromium", you must be '
3461 'properly set up to build that platform.')
3462 group.add_option('--no_custom_deps',
3463 dest='no_custom_deps',
3464 action="store_true",
3465 default=False,
3466 help='Run the script with custom_deps or not.')
3467 group.add_option('--extra_src',
3468 type='str',
3469 help='Path to a script which can be used to modify '
3470 'the bisect script\'s behavior.')
3471 group.add_option('--cros_board',
3472 type='str',
3473 help='The cros board type to build.')
3474 group.add_option('--cros_remote_ip',
3475 type='str',
3476 help='The remote machine to image to.')
3477 group.add_option('--use_goma',
3478 action="store_true",
3479 help='Add a bunch of extra threads for goma.')
3480 group.add_option('--output_buildbot_annotations',
3481 action="store_true",
3482 help='Add extra annotation output for buildbot.')
3483 group.add_option('--gs_bucket',
3484 default='',
3485 dest='gs_bucket',
3486 type='str',
3487 help=('Name of Google Storage bucket to upload or '
3488 'download build. e.g., chrome-perf'))
3489 group.add_option('--target_arch',
3490 type='choice',
3491 choices=['ia32', 'x64', 'arm'],
3492 default='ia32',
3493 dest='target_arch',
3494 help=('The target build architecture. Choices are "ia32" '
3495 '(default), "x64" or "arm".'))
3496 group.add_option('--builder_host',
3497 dest='builder_host',
3498 type='str',
3499 help=('Host address of server to produce build by posting'
3500 ' try job request.'))
3501 group.add_option('--builder_port',
3502 dest='builder_port',
3503 type='int',
3504 help=('HTTP port of the server to produce build by posting'
3505 ' try job request.'))
3506 parser.add_option_group(group)
3508 group = optparse.OptionGroup(parser, 'Debug options')
3509 group.add_option('--debug_ignore_build',
3510 action="store_true",
3511 help='DEBUG: Don\'t perform builds.')
3512 group.add_option('--debug_ignore_sync',
3513 action="store_true",
3514 help='DEBUG: Don\'t perform syncs.')
3515 group.add_option('--debug_ignore_perf_test',
3516 action="store_true",
3517 help='DEBUG: Don\'t perform performance tests.')
3518 parser.add_option_group(group)
3519 return parser
3521 def ParseCommandLine(self):
3522 """Parses the command line for bisect options."""
3523 parser = self._CreateCommandLineParser()
3524 (opts, _) = parser.parse_args()
3526 try:
3527 if not opts.command:
3528 raise RuntimeError('missing required parameter: --command')
3530 if not opts.good_revision:
3531 raise RuntimeError('missing required parameter: --good_revision')
3533 if not opts.bad_revision:
3534 raise RuntimeError('missing required parameter: --bad_revision')
3536 if not opts.metric:
3537 raise RuntimeError('missing required parameter: --metric')
3539 if opts.gs_bucket:
3540 if not cloud_storage.List(opts.gs_bucket):
3541 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3542 if not opts.builder_host:
3543 raise RuntimeError('Must specify try server hostname, when '
3544 'gs_bucket is used: --builder_host')
3545 if not opts.builder_port:
3546 raise RuntimeError('Must specify try server port number, when '
3547 'gs_bucket is used: --builder_port')
3548 if opts.target_platform == 'cros':
3549 # Run sudo up front to make sure credentials are cached for later.
3550 print 'Sudo is required to build cros:'
3551 print
3552 RunProcess(['sudo', 'true'])
3554 if not opts.cros_board:
3555 raise RuntimeError('missing required parameter: --cros_board')
3557 if not opts.cros_remote_ip:
3558 raise RuntimeError('missing required parameter: --cros_remote_ip')
3560 if not opts.working_directory:
3561 raise RuntimeError('missing required parameter: --working_directory')
3563 metric_values = opts.metric.split('/')
3564 if len(metric_values) != 2:
3565 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3567 opts.metric = metric_values
3568 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3569 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3570 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3571 opts.truncate_percent = opts.truncate_percent / 100.0
3573 for k, v in opts.__dict__.iteritems():
3574 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
3575 setattr(self, k, v)
3576 except RuntimeError, e:
3577 output_string = StringIO.StringIO()
3578 parser.print_help(file=output_string)
3579 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3580 output_string.close()
3581 raise RuntimeError(error_message)
3583 @staticmethod
3584 def FromDict(values):
3585 """Creates an instance of BisectOptions with the values parsed from a
3586 .cfg file.
3588 Args:
3589 values: a dict containing options to set.
3591 Returns:
3592 An instance of BisectOptions.
3594 opts = BisectOptions()
3595 for k, v in values.iteritems():
3596 assert hasattr(opts, k), 'Invalid %s attribute in '\
3597 'BisectOptions.' % k
3598 setattr(opts, k, v)
3600 metric_values = opts.metric.split('/')
3601 if len(metric_values) != 2:
3602 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3604 opts.metric = metric_values
3605 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3606 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3607 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3608 opts.truncate_percent = opts.truncate_percent / 100.0
3610 return opts
3613 def main():
3615 try:
3616 opts = BisectOptions()
3617 opts.ParseCommandLine()
3619 if opts.extra_src:
3620 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3621 if not extra_src:
3622 raise RuntimeError("Invalid or missing --extra_src.")
3623 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3625 if opts.working_directory:
3626 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3627 if opts.no_custom_deps:
3628 custom_deps = None
3629 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3631 os.chdir(os.path.join(os.getcwd(), 'src'))
3633 if not RemoveBuildFiles():
3634 raise RuntimeError('Something went wrong removing the build files.')
3636 if not IsPlatformSupported(opts):
3637 raise RuntimeError("Sorry, this platform isn't supported yet.")
3639 # Check what source control method they're using. Only support git workflow
3640 # at the moment.
3641 source_control = DetermineAndCreateSourceControl(opts)
3643 if not source_control:
3644 raise RuntimeError("Sorry, only the git workflow is supported at the "
3645 "moment.")
3647 # gClient sync seems to fail if you're not in master branch.
3648 if (not source_control.IsInProperBranch() and
3649 not opts.debug_ignore_sync and
3650 not opts.working_directory):
3651 raise RuntimeError("You must switch to master branch to run bisection.")
3652 bisect_test = BisectPerformanceMetrics(source_control, opts)
3653 try:
3654 bisect_results = bisect_test.Run(opts.command,
3655 opts.bad_revision,
3656 opts.good_revision,
3657 opts.metric)
3658 if bisect_results['error']:
3659 raise RuntimeError(bisect_results['error'])
3660 bisect_test.FormatAndPrintResults(bisect_results)
3661 return 0
3662 finally:
3663 bisect_test.PerformCleanup()
3664 except RuntimeError, e:
3665 if opts.output_buildbot_annotations:
3666 # The perf dashboard scrapes the "results" step in order to comment on
3667 # bugs. If you change this, please update the perf dashboard as well.
3668 bisect_utils.OutputAnnotationStepStart('Results')
3669 print 'Error: %s' % e.message
3670 if opts.output_buildbot_annotations:
3671 bisect_utils.OutputAnnotationStepClosed()
3672 return 1
3674 if __name__ == '__main__':
3675 sys.exit(main())