android: disable NLS when building yasm.
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blob36ef1fa9c35b990812d8085a4264e028b7681753
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
36 """
38 import copy
39 import datetime
40 import errno
41 import imp
42 import math
43 import optparse
44 import os
45 import re
46 import shlex
47 import shutil
48 import StringIO
49 import subprocess
50 import sys
51 import time
53 import bisect_utils
56 # The additional repositories that might need to be bisected.
57 # If the repository has any dependant repositories (such as skia/src needs
58 # skia/include and skia/gyp to be updated), specify them in the 'depends'
59 # so that they're synced appropriately.
60 # Format is:
61 # src: path to the working directory.
62 # recurse: True if this repositry will get bisected.
63 # depends: A list of other repositories that are actually part of the same
64 # repository in svn.
65 # svn: Needed for git workflow to resolve hashes to svn revisions.
66 # from: Parent depot that must be bisected before this is bisected.
67 DEPOT_DEPS_NAME = {
68 'chromium' : {
69 "src" : "src",
70 "recurse" : True,
71 "depends" : None,
72 "from" : ['cros', 'android-chrome'],
73 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision='
75 'webkit' : {
76 "src" : "src/third_party/WebKit",
77 "recurse" : True,
78 "depends" : None,
79 "from" : ['chromium'],
80 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision='
82 'angle' : {
83 "src" : "src/third_party/angle",
84 "src_old" : "src/third_party/angle_dx11",
85 "recurse" : True,
86 "depends" : None,
87 "from" : ['chromium'],
88 "platform": 'nt',
90 'v8' : {
91 "src" : "src/v8",
92 "recurse" : True,
93 "depends" : None,
94 "from" : ['chromium'],
95 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
96 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
98 'v8_bleeding_edge' : {
99 "src" : "src/v8_bleeding_edge",
100 "recurse" : True,
101 "depends" : None,
102 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
103 "from" : ['v8'],
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
106 'skia/src' : {
107 "src" : "src/third_party/skia/src",
108 "recurse" : True,
109 "svn" : "http://skia.googlecode.com/svn/trunk/src",
110 "depends" : ['skia/include', 'skia/gyp'],
111 "from" : ['chromium'],
112 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
114 'skia/include' : {
115 "src" : "src/third_party/skia/include",
116 "recurse" : False,
117 "svn" : "http://skia.googlecode.com/svn/trunk/include",
118 "depends" : None,
119 "from" : ['chromium'],
120 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
122 'skia/gyp' : {
123 "src" : "src/third_party/skia/gyp",
124 "recurse" : False,
125 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
126 "depends" : None,
127 "from" : ['chromium'],
128 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
132 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
133 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
134 CROS_VERSION_PATTERN = 'new version number from %s'
135 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
136 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
137 'testing_rsa')
138 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
139 'mod_for_test_scripts', 'ssh_keys',
140 'testing_rsa')
142 BUILD_RESULT_SUCCEED = 0
143 BUILD_RESULT_FAIL = 1
144 BUILD_RESULT_SKIPPED = 2
147 def _AddAdditionalDepotInfo(depot_info):
148 """Adds additional depot info to the global depot variables."""
149 global DEPOT_DEPS_NAME
150 global DEPOT_NAMES
151 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
152 depot_info.items())
153 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
156 def CalculateTruncatedMean(data_set, truncate_percent):
157 """Calculates the truncated mean of a set of values.
159 Args:
160 data_set: Set of values to use in calculation.
161 truncate_percent: The % from the upper/lower portions of the data set to
162 discard, expressed as a value in [0, 1].
164 Returns:
165 The truncated mean as a float.
167 if len(data_set) > 2:
168 data_set = sorted(data_set)
170 discard_num_float = len(data_set) * truncate_percent
171 discard_num_int = int(math.floor(discard_num_float))
172 kept_weight = len(data_set) - discard_num_float * 2
174 data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
176 weight_left = 1.0 - (discard_num_float - discard_num_int)
178 if weight_left < 1:
179 # If the % to discard leaves a fractional portion, need to weight those
180 # values.
181 unweighted_vals = data_set[1:len(data_set)-1]
182 weighted_vals = [data_set[0], data_set[len(data_set)-1]]
183 weighted_vals = [w * weight_left for w in weighted_vals]
184 data_set = weighted_vals + unweighted_vals
185 else:
186 kept_weight = len(data_set)
188 truncated_mean = reduce(lambda x, y: float(x) + float(y),
189 data_set) / kept_weight
191 return truncated_mean
194 def CalculateStandardDeviation(v):
195 if len(v) == 1:
196 return 0.0
198 mean = CalculateTruncatedMean(v, 0.0)
199 variances = [float(x) - mean for x in v]
200 variances = [x * x for x in variances]
201 variance = reduce(lambda x, y: float(x) + float(y), variances) / (len(v) - 1)
202 std_dev = math.sqrt(variance)
204 return std_dev
207 def CalculatePooledStandardError(work_sets):
208 numerator = 0.0
209 denominator1 = 0.0
210 denominator2 = 0.0
212 for current_set in work_sets:
213 std_dev = CalculateStandardDeviation(current_set)
214 numerator += (len(current_set) - 1) * std_dev ** 2
215 denominator1 += len(current_set) - 1
216 denominator2 += 1.0 / len(current_set)
218 if denominator1:
219 return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
220 return 0.0
223 def CalculateStandardError(v):
224 if len(v) <= 1:
225 return 0.0
227 std_dev = CalculateStandardDeviation(v)
229 return std_dev / math.sqrt(len(v))
232 def IsStringFloat(string_to_check):
233 """Checks whether or not the given string can be converted to a floating
234 point number.
236 Args:
237 string_to_check: Input string to check if it can be converted to a float.
239 Returns:
240 True if the string can be converted to a float.
242 try:
243 float(string_to_check)
245 return True
246 except ValueError:
247 return False
250 def IsStringInt(string_to_check):
251 """Checks whether or not the given string can be converted to a integer.
253 Args:
254 string_to_check: Input string to check if it can be converted to an int.
256 Returns:
257 True if the string can be converted to an int.
259 try:
260 int(string_to_check)
262 return True
263 except ValueError:
264 return False
267 def IsWindows():
268 """Checks whether or not the script is running on Windows.
270 Returns:
271 True if running on Windows.
273 return os.name == 'nt'
276 def RunProcess(command):
277 """Run an arbitrary command. If output from the call is needed, use
278 RunProcessAndRetrieveOutput instead.
280 Args:
281 command: A list containing the command and args to execute.
283 Returns:
284 The return code of the call.
286 # On Windows, use shell=True to get PATH interpretation.
287 shell = IsWindows()
288 return subprocess.call(command, shell=shell)
291 def RunProcessAndRetrieveOutput(command, cwd=None):
292 """Run an arbitrary command, returning its output and return code. Since
293 output is collected via communicate(), there will be no output until the
294 call terminates. If you need output while the program runs (ie. so
295 that the buildbot doesn't terminate the script), consider RunProcess().
297 Args:
298 command: A list containing the command and args to execute.
300 Returns:
301 A tuple of the output and return code.
303 # On Windows, use shell=True to get PATH interpretation.
304 shell = IsWindows()
305 proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE, cwd=cwd)
307 (output, _) = proc.communicate()
309 return (output, proc.returncode)
312 def RunGit(command, cwd=None):
313 """Run a git subcommand, returning its output and return code.
315 Args:
316 command: A list containing the args to git.
318 Returns:
319 A tuple of the output and return code.
321 command = ['git'] + command
323 return RunProcessAndRetrieveOutput(command, cwd=cwd)
326 def CheckRunGit(command, cwd=None):
327 """Run a git subcommand, returning its output and return code. Asserts if
328 the return code of the call is non-zero.
330 Args:
331 command: A list containing the args to git.
333 Returns:
334 A tuple of the output and return code.
336 (output, return_code) = RunGit(command, cwd=cwd)
338 assert not return_code, 'An error occurred while running'\
339 ' "git %s"' % ' '.join(command)
340 return output
343 def SetBuildSystemDefault(build_system):
344 """Sets up any environment variables needed to build with the specified build
345 system.
347 Args:
348 build_system: A string specifying build system. Currently only 'ninja' or
349 'make' are supported."""
350 if build_system == 'ninja':
351 gyp_var = os.getenv('GYP_GENERATORS')
353 if not gyp_var or not 'ninja' in gyp_var:
354 if gyp_var:
355 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
356 else:
357 os.environ['GYP_GENERATORS'] = 'ninja'
359 if IsWindows():
360 os.environ['GYP_DEFINES'] = 'component=shared_library '\
361 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
362 'chromium_win_pch=0'
363 elif build_system == 'make':
364 os.environ['GYP_GENERATORS'] = 'make'
365 else:
366 raise RuntimeError('%s build not supported.' % build_system)
369 def BuildWithMake(threads, targets):
370 cmd = ['make', 'BUILDTYPE=Release']
372 if threads:
373 cmd.append('-j%d' % threads)
375 cmd += targets
377 return_code = RunProcess(cmd)
379 return not return_code
382 def BuildWithNinja(threads, targets):
383 cmd = ['ninja', '-C', os.path.join('out', 'Release')]
385 if threads:
386 cmd.append('-j%d' % threads)
388 cmd += targets
390 return_code = RunProcess(cmd)
392 return not return_code
395 def BuildWithVisualStudio(targets):
396 path_to_devenv = os.path.abspath(
397 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
398 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
399 cmd = [path_to_devenv, '/build', 'Release', path_to_sln]
401 for t in targets:
402 cmd.extend(['/Project', t])
404 return_code = RunProcess(cmd)
406 return not return_code
409 class Builder(object):
410 """Builder is used by the bisect script to build relevant targets and deploy.
412 def __init__(self, opts):
413 """Performs setup for building with target build system.
415 Args:
416 opts: Options parsed from command line.
418 if IsWindows():
419 if not opts.build_preference:
420 opts.build_preference = 'msvs'
422 if opts.build_preference == 'msvs':
423 if not os.getenv('VS100COMNTOOLS'):
424 raise RuntimeError(
425 'Path to visual studio could not be determined.')
426 else:
427 SetBuildSystemDefault(opts.build_preference)
428 else:
429 if not opts.build_preference:
430 if 'ninja' in os.getenv('GYP_GENERATORS'):
431 opts.build_preference = 'ninja'
432 else:
433 opts.build_preference = 'make'
435 SetBuildSystemDefault(opts.build_preference)
437 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
438 raise RuntimeError('Failed to set platform environment.')
440 bisect_utils.RunGClient(['runhooks'])
442 @staticmethod
443 def FromOpts(opts):
444 builder = None
445 if opts.target_platform == 'cros':
446 builder = CrosBuilder(opts)
447 elif opts.target_platform == 'android':
448 builder = AndroidBuilder(opts)
449 elif opts.target_platform == 'android-chrome':
450 builder = AndroidChromeBuilder(opts)
451 else:
452 builder = DesktopBuilder(opts)
453 return builder
455 def Build(self, depot, opts):
456 raise NotImplementedError()
459 class DesktopBuilder(Builder):
460 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
461 def __init__(self, opts):
462 super(DesktopBuilder, self).__init__(opts)
464 def Build(self, depot, opts):
465 """Builds chromium_builder_perf target using options passed into
466 the script.
468 Args:
469 depot: Current depot being bisected.
470 opts: The options parsed from the command line.
472 Returns:
473 True if build was successful.
475 targets = ['chromium_builder_perf']
477 threads = None
478 if opts.use_goma:
479 threads = 64
481 build_success = False
482 if opts.build_preference == 'make':
483 build_success = BuildWithMake(threads, targets)
484 elif opts.build_preference == 'ninja':
485 build_success = BuildWithNinja(threads, targets)
486 elif opts.build_preference == 'msvs':
487 assert IsWindows(), 'msvs is only supported on Windows.'
488 build_success = BuildWithVisualStudio(targets)
489 else:
490 assert False, 'No build system defined.'
491 return build_success
494 class AndroidBuilder(Builder):
495 """AndroidBuilder is used to build on android."""
496 def __init__(self, opts):
497 super(AndroidBuilder, self).__init__(opts)
499 def _GetTargets(self):
500 return ['chromium_testshell', 'cc_perftests_apk', 'android_tools']
502 def Build(self, depot, opts):
503 """Builds the android content shell and other necessary tools using options
504 passed into the script.
506 Args:
507 depot: Current depot being bisected.
508 opts: The options parsed from the command line.
510 Returns:
511 True if build was successful.
513 threads = None
514 if opts.use_goma:
515 threads = 64
517 build_success = False
518 if opts.build_preference == 'ninja':
519 build_success = BuildWithNinja(threads, self._GetTargets())
520 else:
521 assert False, 'No build system defined.'
523 return build_success
526 class AndroidChromeBuilder(AndroidBuilder):
527 """AndroidBuilder is used to build on android's chrome."""
528 def __init__(self, opts):
529 super(AndroidChromeBuilder, self).__init__(opts)
531 def _GetTargets(self):
532 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
535 class CrosBuilder(Builder):
536 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
537 target platform."""
538 def __init__(self, opts):
539 super(CrosBuilder, self).__init__(opts)
541 def ImageToTarget(self, opts):
542 """Installs latest image to target specified by opts.cros_remote_ip.
544 Args:
545 opts: Program options containing cros_board and cros_remote_ip.
547 Returns:
548 True if successful.
550 try:
551 # Keys will most likely be set to 0640 after wiping the chroot.
552 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
553 os.chmod(CROS_TEST_KEY_PATH, 0600)
554 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
555 '--remote=%s' % opts.cros_remote_ip,
556 '--board=%s' % opts.cros_board, '--test', '--verbose']
558 return_code = RunProcess(cmd)
559 return not return_code
560 except OSError, e:
561 return False
563 def BuildPackages(self, opts, depot):
564 """Builds packages for cros.
566 Args:
567 opts: Program options containing cros_board.
568 depot: The depot being bisected.
570 Returns:
571 True if successful.
573 cmd = [CROS_SDK_PATH]
575 if depot != 'cros':
576 path_to_chrome = os.path.join(os.getcwd(), '..')
577 cmd += ['--chrome_root=%s' % path_to_chrome]
579 cmd += ['--']
581 if depot != 'cros':
582 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
584 cmd += ['BUILDTYPE=Release', './build_packages',
585 '--board=%s' % opts.cros_board]
586 return_code = RunProcess(cmd)
588 return not return_code
590 def BuildImage(self, opts, depot):
591 """Builds test image for cros.
593 Args:
594 opts: Program options containing cros_board.
595 depot: The depot being bisected.
597 Returns:
598 True if successful.
600 cmd = [CROS_SDK_PATH]
602 if depot != 'cros':
603 path_to_chrome = os.path.join(os.getcwd(), '..')
604 cmd += ['--chrome_root=%s' % path_to_chrome]
606 cmd += ['--']
608 if depot != 'cros':
609 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
611 cmd += ['BUILDTYPE=Release', '--', './build_image',
612 '--board=%s' % opts.cros_board, 'test']
614 return_code = RunProcess(cmd)
616 return not return_code
618 def Build(self, depot, opts):
619 """Builds targets using options passed into the script.
621 Args:
622 depot: Current depot being bisected.
623 opts: The options parsed from the command line.
625 Returns:
626 True if build was successful.
628 if self.BuildPackages(opts, depot):
629 if self.BuildImage(opts, depot):
630 return self.ImageToTarget(opts)
631 return False
634 class SourceControl(object):
635 """SourceControl is an abstraction over the underlying source control
636 system used for chromium. For now only git is supported, but in the
637 future, the svn workflow could be added as well."""
638 def __init__(self):
639 super(SourceControl, self).__init__()
641 def SyncToRevisionWithGClient(self, revision):
642 """Uses gclient to sync to the specified revision.
644 ie. gclient sync --revision <revision>
646 Args:
647 revision: The git SHA1 or svn CL (depending on workflow).
649 Returns:
650 The return code of the call.
652 return bisect_utils.RunGClient(['sync', '--revision',
653 revision, '--verbose', '--nohooks', '--reset', '--force'])
655 def SyncToRevisionWithRepo(self, timestamp):
656 """Uses repo to sync all the underlying git depots to the specified
657 time.
659 Args:
660 timestamp: The unix timestamp to sync to.
662 Returns:
663 The return code of the call.
665 return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
668 class GitSourceControl(SourceControl):
669 """GitSourceControl is used to query the underlying source control. """
670 def __init__(self, opts):
671 super(GitSourceControl, self).__init__()
672 self.opts = opts
674 def IsGit(self):
675 return True
677 def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
678 """Retrieves a list of revisions between |revision_range_start| and
679 |revision_range_end|.
681 Args:
682 revision_range_end: The SHA1 for the end of the range.
683 revision_range_start: The SHA1 for the beginning of the range.
685 Returns:
686 A list of the revisions between |revision_range_start| and
687 |revision_range_end| (inclusive).
689 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
690 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
691 log_output = CheckRunGit(cmd, cwd=cwd)
693 revision_hash_list = log_output.split()
694 revision_hash_list.append(revision_range_start)
696 return revision_hash_list
698 def SyncToRevision(self, revision, sync_client=None):
699 """Syncs to the specified revision.
701 Args:
702 revision: The revision to sync to.
703 use_gclient: Specifies whether or not we should sync using gclient or
704 just use source control directly.
706 Returns:
707 True if successful.
710 if not sync_client:
711 results = RunGit(['checkout', revision])[1]
712 elif sync_client == 'gclient':
713 results = self.SyncToRevisionWithGClient(revision)
714 elif sync_client == 'repo':
715 results = self.SyncToRevisionWithRepo(revision)
717 return not results
719 def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
720 """If an SVN revision is supplied, try to resolve it to a git SHA1.
722 Args:
723 revision_to_check: The user supplied revision string that may need to be
724 resolved to a git SHA1.
725 depot: The depot the revision_to_check is from.
726 search: The number of changelists to try if the first fails to resolve
727 to a git hash. If the value is negative, the function will search
728 backwards chronologically, otherwise it will search forward.
730 Returns:
731 A string containing a git SHA1 hash, otherwise None.
733 # Android-chrome is git only, so no need to resolve this to anything else.
734 if depot == 'android-chrome':
735 return revision_to_check
737 if depot != 'cros':
738 if not IsStringInt(revision_to_check):
739 return revision_to_check
741 depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
743 if depot != 'chromium':
744 depot_svn = DEPOT_DEPS_NAME[depot]['svn']
746 svn_revision = int(revision_to_check)
747 git_revision = None
749 if search > 0:
750 search_range = xrange(svn_revision, svn_revision + search, 1)
751 else:
752 search_range = xrange(svn_revision, svn_revision + search, -1)
754 for i in search_range:
755 svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
756 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
757 'origin/master']
759 (log_output, return_code) = RunGit(cmd, cwd=cwd)
761 assert not return_code, 'An error occurred while running'\
762 ' "git %s"' % ' '.join(cmd)
764 if not return_code:
765 log_output = log_output.strip()
767 if log_output:
768 git_revision = log_output
770 break
772 return git_revision
773 else:
774 if IsStringInt(revision_to_check):
775 return int(revision_to_check)
776 else:
777 cwd = os.getcwd()
778 os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
779 'chromiumos-overlay'))
780 pattern = CROS_VERSION_PATTERN % revision_to_check
781 cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
783 git_revision = None
785 log_output = CheckRunGit(cmd, cwd=cwd)
786 if log_output:
787 git_revision = log_output
788 git_revision = int(log_output.strip())
789 os.chdir(cwd)
791 return git_revision
793 def IsInProperBranch(self):
794 """Confirms they're in the master branch for performing the bisection.
795 This is needed or gclient will fail to sync properly.
797 Returns:
798 True if the current branch on src is 'master'
800 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
801 log_output = CheckRunGit(cmd)
802 log_output = log_output.strip()
804 return log_output == "master"
806 def SVNFindRev(self, revision):
807 """Maps directly to the 'git svn find-rev' command.
809 Args:
810 revision: The git SHA1 to use.
812 Returns:
813 An integer changelist #, otherwise None.
816 cmd = ['svn', 'find-rev', revision]
818 output = CheckRunGit(cmd)
819 svn_revision = output.strip()
821 if IsStringInt(svn_revision):
822 return int(svn_revision)
824 return None
826 def QueryRevisionInfo(self, revision, cwd=None):
827 """Gathers information on a particular revision, such as author's name,
828 email, subject, and date.
830 Args:
831 revision: Revision you want to gather information on.
832 Returns:
833 A dict in the following format:
835 'author': %s,
836 'email': %s,
837 'date': %s,
838 'subject': %s,
839 'body': %s,
842 commit_info = {}
844 formats = ['%cN', '%cE', '%s', '%cD', '%b']
845 targets = ['author', 'email', 'subject', 'date', 'body']
847 for i in xrange(len(formats)):
848 cmd = ['log', '--format=%s' % formats[i], '-1', revision]
849 output = CheckRunGit(cmd, cwd=cwd)
850 commit_info[targets[i]] = output.rstrip()
852 return commit_info
854 def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
855 """Performs a checkout on a file at the given revision.
857 Returns:
858 True if successful.
860 return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
862 def RevertFileToHead(self, file_name):
863 """Unstages a file and returns it to HEAD.
865 Returns:
866 True if successful.
868 # Reset doesn't seem to return 0 on success.
869 RunGit(['reset', 'HEAD', bisect_utils.FILE_DEPS_GIT])
871 return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
873 def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
874 """Returns a list of commits that modified this file.
876 Args:
877 filename: Name of file.
878 revision_start: Start of revision range.
879 revision_end: End of revision range.
881 Returns:
882 Returns a list of commits that touched this file.
884 cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
885 filename]
886 output = CheckRunGit(cmd)
888 return [o for o in output.split('\n') if o]
890 class BisectPerformanceMetrics(object):
891 """BisectPerformanceMetrics performs a bisection against a list of range
892 of revisions to narrow down where performance regressions may have
893 occurred."""
895 def __init__(self, source_control, opts):
896 super(BisectPerformanceMetrics, self).__init__()
898 self.opts = opts
899 self.source_control = source_control
900 self.src_cwd = os.getcwd()
901 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
902 self.depot_cwd = {}
903 self.cleanup_commands = []
904 self.warnings = []
905 self.builder = Builder.FromOpts(opts)
907 # This always starts true since the script grabs latest first.
908 self.was_blink = True
910 for d in DEPOT_NAMES:
911 # The working directory of each depot is just the path to the depot, but
912 # since we're already in 'src', we can skip that part.
914 self.depot_cwd[d] = os.path.join(
915 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
917 def PerformCleanup(self):
918 """Performs cleanup when script is finished."""
919 os.chdir(self.src_cwd)
920 for c in self.cleanup_commands:
921 if c[0] == 'mv':
922 shutil.move(c[1], c[2])
923 else:
924 assert False, 'Invalid cleanup command.'
926 def GetRevisionList(self, depot, bad_revision, good_revision):
927 """Retrieves a list of all the commits between the bad revision and
928 last known good revision."""
930 revision_work_list = []
932 if depot == 'cros':
933 revision_range_start = good_revision
934 revision_range_end = bad_revision
936 cwd = os.getcwd()
937 self.ChangeToDepotWorkingDirectory('cros')
939 # Print the commit timestamps for every commit in the revision time
940 # range. We'll sort them and bisect by that. There is a remote chance that
941 # 2 (or more) commits will share the exact same timestamp, but it's
942 # probably safe to ignore that case.
943 cmd = ['repo', 'forall', '-c',
944 'git log --format=%%ct --before=%d --after=%d' % (
945 revision_range_end, revision_range_start)]
946 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
948 assert not return_code, 'An error occurred while running'\
949 ' "%s"' % ' '.join(cmd)
951 os.chdir(cwd)
953 revision_work_list = list(set(
954 [int(o) for o in output.split('\n') if IsStringInt(o)]))
955 revision_work_list = sorted(revision_work_list, reverse=True)
956 else:
957 cwd = self._GetDepotDirectory(depot)
958 revision_work_list = self.source_control.GetRevisionList(bad_revision,
959 good_revision, cwd=cwd)
961 return revision_work_list
963 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
964 svn_revision = self.source_control.SVNFindRev(revision)
966 if IsStringInt(svn_revision):
967 # V8 is tricky to bisect, in that there are only a few instances when
968 # we can dive into bleeding_edge and get back a meaningful result.
969 # Try to detect a V8 "business as usual" case, which is when:
970 # 1. trunk revision N has description "Version X.Y.Z"
971 # 2. bleeding_edge revision (N-1) has description "Prepare push to
972 # trunk. Now working on X.Y.(Z+1)."
973 v8_dir = self._GetDepotDirectory('v8')
974 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
976 revision_info = self.source_control.QueryRevisionInfo(revision,
977 cwd=v8_dir)
979 version_re = re.compile("Version (?P<values>[0-9,.]+)")
981 regex_results = version_re.search(revision_info['subject'])
983 if regex_results:
984 version = regex_results.group('values')
986 git_revision = self.source_control.ResolveToRevision(
987 int(svn_revision) - 1, 'v8_bleeding_edge', -1,
988 cwd=v8_bleeding_edge_dir)
990 if git_revision:
991 revision_info = self.source_control.QueryRevisionInfo(git_revision,
992 cwd=v8_bleeding_edge_dir)
994 if 'Prepare push to trunk' in revision_info['subject']:
995 return git_revision
996 return None
998 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
999 cwd = self._GetDepotDirectory('v8')
1000 cmd = ['log', '--format=%ct', '-1', revision]
1001 output = CheckRunGit(cmd, cwd=cwd)
1002 commit_time = int(output)
1003 commits = []
1005 if search_forward:
1006 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1007 'origin/master']
1008 output = CheckRunGit(cmd, cwd=cwd)
1009 output = output.split()
1010 commits = output
1011 commits = reversed(commits)
1012 else:
1013 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1014 'origin/master']
1015 output = CheckRunGit(cmd, cwd=cwd)
1016 output = output.split()
1017 commits = output
1019 bleeding_edge_revision = None
1021 for c in commits:
1022 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1023 if bleeding_edge_revision:
1024 break
1026 return bleeding_edge_revision
1028 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1029 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1031 Returns:
1032 A dict in the format {depot:revision} if successful, otherwise None.
1035 cwd = os.getcwd()
1036 self.ChangeToDepotWorkingDirectory(depot)
1038 results = {}
1040 if depot == 'chromium' or depot == 'android-chrome':
1041 locals = {'Var': lambda _: locals["vars"][_],
1042 'From': lambda *args: None}
1043 execfile(bisect_utils.FILE_DEPS_GIT, {}, locals)
1045 os.chdir(cwd)
1047 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1049 for d in DEPOT_NAMES:
1050 if DEPOT_DEPS_NAME[d].has_key('platform'):
1051 if DEPOT_DEPS_NAME[d]['platform'] != os.name:
1052 continue
1054 if (DEPOT_DEPS_NAME[d]['recurse'] and
1055 depot in DEPOT_DEPS_NAME[d]['from']):
1056 if (locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']) or
1057 locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src_old'])):
1058 if locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src']):
1059 re_results = rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src']])
1060 self.depot_cwd[d] =\
1061 os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1062 elif locals['deps'].has_key(DEPOT_DEPS_NAME[d]['src_old']):
1063 re_results =\
1064 rxp.search(locals['deps'][DEPOT_DEPS_NAME[d]['src_old']])
1065 self.depot_cwd[d] =\
1066 os.path.join(self.src_cwd, DEPOT_DEPS_NAME[d]['src_old'][4:])
1068 if re_results:
1069 results[d] = re_results.group('revision')
1070 else:
1071 print 'Couldn\'t parse revision for %s.' % d
1072 print
1073 return None
1074 else:
1075 print 'Couldn\'t find %s while parsing .DEPS.git.' % d
1076 print
1077 return None
1078 elif depot == 'cros':
1079 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1080 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1081 CROS_CHROMEOS_PATTERN]
1082 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1084 assert not return_code, 'An error occurred while running'\
1085 ' "%s"' % ' '.join(cmd)
1087 if len(output) > CROS_CHROMEOS_PATTERN:
1088 output = output[len(CROS_CHROMEOS_PATTERN):]
1090 if len(output) > 1:
1091 output = output.split('_')[0]
1093 if len(output) > 3:
1094 contents = output.split('.')
1096 version = contents[2]
1098 if contents[3] != '0':
1099 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' %\
1100 (version, contents[3], version)
1101 if not warningText in self.warnings:
1102 self.warnings.append(warningText)
1104 cwd = os.getcwd()
1105 self.ChangeToDepotWorkingDirectory('chromium')
1106 return_code = CheckRunGit(['log', '-1', '--format=%H',
1107 '--author=chrome-release@google.com', '--grep=to %s' % version,
1108 'origin/master'])
1109 os.chdir(cwd)
1111 results['chromium'] = output.strip()
1112 elif depot == 'v8':
1113 # We can't try to map the trunk revision to bleeding edge yet, because
1114 # we don't know which direction to try to search in. Have to wait until
1115 # the bisect has narrowed the results down to 2 v8 rolls.
1116 results['v8_bleeding_edge'] = None
1118 return results
1120 def BuildCurrentRevision(self, depot):
1121 """Builds chrome and performance_ui_tests on the current revision.
1123 Returns:
1124 True if the build was successful.
1126 if self.opts.debug_ignore_build:
1127 return True
1129 cwd = os.getcwd()
1130 os.chdir(self.src_cwd)
1132 build_success = self.builder.Build(depot, self.opts)
1134 os.chdir(cwd)
1136 return build_success
1138 def RunGClientHooks(self):
1139 """Runs gclient with runhooks command.
1141 Returns:
1142 True if gclient reports no errors.
1145 if self.opts.debug_ignore_build:
1146 return True
1148 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1150 def TryParseHistogramValuesFromOutput(self, metric, text):
1151 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1153 Args:
1154 metric: The metric as a list of [<trace>, <value>] strings.
1155 text: The text to parse the metric values from.
1157 Returns:
1158 A list of floating point numbers found.
1160 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
1162 text_lines = text.split('\n')
1163 values_list = []
1165 for current_line in text_lines:
1166 if metric_formatted in current_line:
1167 current_line = current_line[len(metric_formatted):]
1169 try:
1170 histogram_values = eval(current_line)
1172 for b in histogram_values['buckets']:
1173 average_for_bucket = float(b['high'] + b['low']) * 0.5
1174 # Extends the list with N-elements with the average for that bucket.
1175 values_list.extend([average_for_bucket] * b['count'])
1176 except:
1177 pass
1179 return values_list
1181 def TryParseResultValuesFromOutput(self, metric, text):
1182 """Attempts to parse a metric in the format RESULT <graph: <trace>.
1184 Args:
1185 metric: The metric as a list of [<trace>, <value>] strings.
1186 text: The text to parse the metric values from.
1188 Returns:
1189 A list of floating point numbers found.
1191 # Format is: RESULT <graph>: <trace>= <value> <units>
1192 metric_formatted = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
1194 text_lines = text.split('\n')
1195 values_list = []
1197 for current_line in text_lines:
1198 # Parse the output from the performance test for the metric we're
1199 # interested in.
1200 metric_re = metric_formatted +\
1201 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1202 metric_re = re.compile(metric_re)
1203 regex_results = metric_re.search(current_line)
1205 if not regex_results is None:
1206 values_list += [regex_results.group('values')]
1207 else:
1208 metric_re = metric_formatted +\
1209 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1210 metric_re = re.compile(metric_re)
1211 regex_results = metric_re.search(current_line)
1213 if not regex_results is None:
1214 metric_values = regex_results.group('values')
1216 values_list += metric_values.split(',')
1218 values_list = [float(v) for v in values_list if IsStringFloat(v)]
1220 # If the metric is times/t, we need to sum the timings in order to get
1221 # similar regression results as the try-bots.
1222 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
1223 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1225 if metric in metrics_to_sum:
1226 if values_list:
1227 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1229 return values_list
1231 def ParseMetricValuesFromOutput(self, metric, text):
1232 """Parses output from performance_ui_tests and retrieves the results for
1233 a given metric.
1235 Args:
1236 metric: The metric as a list of [<trace>, <value>] strings.
1237 text: The text to parse the metric values from.
1239 Returns:
1240 A list of floating point numbers found.
1242 metric_values = self.TryParseResultValuesFromOutput(metric, text)
1244 if not metric_values:
1245 metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
1247 return metric_values
1249 def _GenerateProfileIfNecessary(self, command_args):
1250 """Checks the command line of the performance test for dependencies on
1251 profile generation, and runs tools/perf/generate_profile as necessary.
1253 Args:
1254 command_args: Command line being passed to performance test, as a list.
1256 Returns:
1257 False if profile generation was necessary and failed, otherwise True.
1260 if '--profile-dir' in ' '.join(command_args):
1261 # If we were using python 2.7+, we could just use the argparse
1262 # module's parse_known_args to grab --profile-dir. Since some of the
1263 # bots still run 2.6, have to grab the arguments manually.
1264 arg_dict = {}
1265 args_to_parse = ['--profile-dir', '--browser']
1267 for arg_to_parse in args_to_parse:
1268 for i, current_arg in enumerate(command_args):
1269 if arg_to_parse in current_arg:
1270 current_arg_split = current_arg.split('=')
1272 # Check 2 cases, --arg=<val> and --arg <val>
1273 if len(current_arg_split) == 2:
1274 arg_dict[arg_to_parse] = current_arg_split[1]
1275 elif i + 1 < len(command_args):
1276 arg_dict[arg_to_parse] = command_args[i+1]
1278 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
1280 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
1281 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
1282 return not RunProcess(['python', path_to_generate,
1283 '--profile-type-to-generate', profile_type,
1284 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
1285 return False
1286 return True
1288 def RunPerformanceTestAndParseResults(self, command_to_run, metric,
1289 reset_on_first_run=False, upload_on_last_run=False, results_label=None):
1290 """Runs a performance test on the current revision by executing the
1291 'command_to_run' and parses the results.
1293 Args:
1294 command_to_run: The command to be run to execute the performance test.
1295 metric: The metric to parse out from the results of the performance test.
1297 Returns:
1298 On success, it will return a tuple of the average value of the metric,
1299 and a success code of 0.
1302 if self.opts.debug_ignore_perf_test:
1303 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1305 if IsWindows():
1306 command_to_run = command_to_run.replace('/', r'\\')
1308 args = shlex.split(command_to_run)
1310 if not self._GenerateProfileIfNecessary(args):
1311 return ('Failed to generate profile for performance test.', -1)
1313 # If running a telemetry test for cros, insert the remote ip, and
1314 # identity parameters.
1315 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
1316 if self.opts.target_platform == 'cros' and is_telemetry:
1317 args.append('--remote=%s' % self.opts.cros_remote_ip)
1318 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
1320 cwd = os.getcwd()
1321 os.chdir(self.src_cwd)
1323 start_time = time.time()
1325 metric_values = []
1326 output_of_all_runs = ''
1327 for i in xrange(self.opts.repeat_test_count):
1328 # Can ignore the return code since if the tests fail, it won't return 0.
1329 try:
1330 current_args = copy.copy(args)
1331 if is_telemetry:
1332 if i == 0 and reset_on_first_run:
1333 current_args.append('--reset-results')
1334 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1335 current_args.append('--upload-results')
1336 if results_label:
1337 current_args.append('--results-label=%s' % results_label)
1338 (output, return_code) = RunProcessAndRetrieveOutput(current_args)
1339 except OSError, e:
1340 if e.errno == errno.ENOENT:
1341 err_text = ("Something went wrong running the performance test. "
1342 "Please review the command line:\n\n")
1343 if 'src/' in ' '.join(args):
1344 err_text += ("Check that you haven't accidentally specified a path "
1345 "with src/ in the command.\n\n")
1346 err_text += ' '.join(args)
1347 err_text += '\n'
1349 return (err_text, -1)
1350 raise
1352 output_of_all_runs += output
1353 if self.opts.output_buildbot_annotations:
1354 print output
1356 metric_values += self.ParseMetricValuesFromOutput(metric, output)
1358 elapsed_minutes = (time.time() - start_time) / 60.0
1360 if elapsed_minutes >= self.opts.max_time_minutes or not metric_values:
1361 break
1363 os.chdir(cwd)
1365 # Need to get the average value if there were multiple values.
1366 if metric_values:
1367 truncated_mean = CalculateTruncatedMean(metric_values,
1368 self.opts.truncate_percent)
1369 standard_err = CalculateStandardError(metric_values)
1370 standard_dev = CalculateStandardDeviation(metric_values)
1372 values = {
1373 'mean': truncated_mean,
1374 'std_err': standard_err,
1375 'std_dev': standard_dev,
1376 'values': metric_values,
1379 print 'Results of performance test: %12f %12f' % (
1380 truncated_mean, standard_err)
1381 print
1382 return (values, 0, output_of_all_runs)
1383 else:
1384 return ('Invalid metric specified, or no values returned from '
1385 'performance test.', -1, output_of_all_runs)
1387 def FindAllRevisionsToSync(self, revision, depot):
1388 """Finds all dependant revisions and depots that need to be synced for a
1389 given revision. This is only useful in the git workflow, as an svn depot
1390 may be split into multiple mirrors.
1392 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1393 skia/include. To sync skia/src properly, one has to find the proper
1394 revisions in skia/gyp and skia/include.
1396 Args:
1397 revision: The revision to sync to.
1398 depot: The depot in use at the moment (probably skia).
1400 Returns:
1401 A list of [depot, revision] pairs that need to be synced.
1403 revisions_to_sync = [[depot, revision]]
1405 is_base = ((depot == 'chromium') or (depot == 'cros') or
1406 (depot == 'android-chrome'))
1408 # Some SVN depots were split into multiple git depots, so we need to
1409 # figure out for each mirror which git revision to grab. There's no
1410 # guarantee that the SVN revision will exist for each of the dependant
1411 # depots, so we have to grep the git logs and grab the next earlier one.
1412 if not is_base and\
1413 DEPOT_DEPS_NAME[depot]['depends'] and\
1414 self.source_control.IsGit():
1415 svn_rev = self.source_control.SVNFindRev(revision)
1417 for d in DEPOT_DEPS_NAME[depot]['depends']:
1418 self.ChangeToDepotWorkingDirectory(d)
1420 dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
1422 if dependant_rev:
1423 revisions_to_sync.append([d, dependant_rev])
1425 num_resolved = len(revisions_to_sync)
1426 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
1428 self.ChangeToDepotWorkingDirectory(depot)
1430 if not ((num_resolved - 1) == num_needed):
1431 return None
1433 return revisions_to_sync
1435 def PerformPreBuildCleanup(self):
1436 """Performs necessary cleanup between runs."""
1437 print 'Cleaning up between runs.'
1438 print
1440 # Having these pyc files around between runs can confuse the
1441 # perf tests and cause them to crash.
1442 for (path, dir, files) in os.walk(self.src_cwd):
1443 for cur_file in files:
1444 if cur_file.endswith('.pyc'):
1445 path_to_file = os.path.join(path, cur_file)
1446 os.remove(path_to_file)
1448 def PerformWebkitDirectoryCleanup(self, revision):
1449 """If the script is switching between Blink and WebKit during bisect,
1450 its faster to just delete the directory rather than leave it up to git
1451 to sync.
1453 Returns:
1454 True if successful.
1456 if not self.source_control.CheckoutFileAtRevision(
1457 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
1458 return False
1460 cwd = os.getcwd()
1461 os.chdir(self.src_cwd)
1463 is_blink = bisect_utils.IsDepsFileBlink()
1465 os.chdir(cwd)
1467 if not self.source_control.RevertFileToHead(
1468 bisect_utils.FILE_DEPS_GIT):
1469 return False
1471 if self.was_blink != is_blink:
1472 self.was_blink = is_blink
1473 return bisect_utils.RemoveThirdPartyWebkitDirectory()
1474 return True
1476 def PerformCrosChrootCleanup(self):
1477 """Deletes the chroot.
1479 Returns:
1480 True if successful.
1482 cwd = os.getcwd()
1483 self.ChangeToDepotWorkingDirectory('cros')
1484 cmd = [CROS_SDK_PATH, '--delete']
1485 return_code = RunProcess(cmd)
1486 os.chdir(cwd)
1487 return not return_code
1489 def CreateCrosChroot(self):
1490 """Creates a new chroot.
1492 Returns:
1493 True if successful.
1495 cwd = os.getcwd()
1496 self.ChangeToDepotWorkingDirectory('cros')
1497 cmd = [CROS_SDK_PATH, '--create']
1498 return_code = RunProcess(cmd)
1499 os.chdir(cwd)
1500 return not return_code
1502 def PerformPreSyncCleanup(self, revision, depot):
1503 """Performs any necessary cleanup before syncing.
1505 Returns:
1506 True if successful.
1508 if depot == 'chromium':
1509 if not bisect_utils.RemoveThirdPartyLibjingleDirectory():
1510 return False
1511 return self.PerformWebkitDirectoryCleanup(revision)
1512 elif depot == 'cros':
1513 return self.PerformCrosChrootCleanup()
1514 return True
1516 def RunPostSync(self, depot):
1517 """Performs any work after syncing.
1519 Returns:
1520 True if successful.
1522 if self.opts.target_platform == 'android':
1523 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
1524 path_to_src=self.src_cwd):
1525 return False
1527 if depot == 'cros':
1528 return self.CreateCrosChroot()
1529 else:
1530 return self.RunGClientHooks()
1531 return True
1533 def ShouldSkipRevision(self, depot, revision):
1534 """Some commits can be safely skipped (such as a DEPS roll), since the tool
1535 is git based those changes would have no effect.
1537 Args:
1538 depot: The depot being bisected.
1539 revision: Current revision we're synced to.
1541 Returns:
1542 True if we should skip building/testing this revision.
1544 if depot == 'chromium':
1545 if self.source_control.IsGit():
1546 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
1547 output = CheckRunGit(cmd)
1549 files = output.splitlines()
1551 if len(files) == 1 and files[0] == 'DEPS':
1552 return True
1554 return False
1556 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
1557 skippable=False):
1558 """Performs a full sync/build/run of the specified revision.
1560 Args:
1561 revision: The revision to sync to.
1562 depot: The depot that's being used at the moment (src, webkit, etc.)
1563 command_to_run: The command to execute the performance test.
1564 metric: The performance metric being tested.
1566 Returns:
1567 On success, a tuple containing the results of the performance test.
1568 Otherwise, a tuple with the error message.
1570 sync_client = None
1571 if depot == 'chromium' or depot == 'android-chrome':
1572 sync_client = 'gclient'
1573 elif depot == 'cros':
1574 sync_client = 'repo'
1576 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
1578 if not revisions_to_sync:
1579 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
1581 if not self.PerformPreSyncCleanup(revision, depot):
1582 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
1584 success = True
1586 if not self.opts.debug_ignore_sync:
1587 for r in revisions_to_sync:
1588 self.ChangeToDepotWorkingDirectory(r[0])
1590 if sync_client:
1591 self.PerformPreBuildCleanup()
1593 # If you're using gclient to sync, you need to specify the depot you
1594 # want so that all the dependencies sync properly as well.
1595 # ie. gclient sync src@<SHA1>
1596 current_revision = r[1]
1597 if sync_client == 'gclient':
1598 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
1599 current_revision)
1600 if not self.source_control.SyncToRevision(current_revision,
1601 sync_client):
1602 success = False
1604 break
1606 if success:
1607 success = self.RunPostSync(depot)
1609 if success:
1610 if skippable and self.ShouldSkipRevision(depot, revision):
1611 return ('Skipped revision: [%s]' % str(revision),
1612 BUILD_RESULT_SKIPPED)
1614 start_build_time = time.time()
1615 if self.BuildCurrentRevision(depot):
1616 after_build_time = time.time()
1617 results = self.RunPerformanceTestAndParseResults(command_to_run,
1618 metric)
1620 if results[1] == 0:
1621 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
1622 depot, revision)
1624 if not external_revisions is None:
1625 return (results[0], results[1], external_revisions,
1626 time.time() - after_build_time, after_build_time -
1627 start_build_time)
1628 else:
1629 return ('Failed to parse DEPS file for external revisions.',
1630 BUILD_RESULT_FAIL)
1631 else:
1632 return results
1633 else:
1634 return ('Failed to build revision: [%s]' % (str(revision, )),
1635 BUILD_RESULT_FAIL)
1636 else:
1637 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
1638 else:
1639 return ('Failed to sync revision: [%s]' % (str(revision, )),
1640 BUILD_RESULT_FAIL)
1642 def CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
1643 """Given known good and bad values, decide if the current_value passed
1644 or failed.
1646 Args:
1647 current_value: The value of the metric being checked.
1648 known_bad_value: The reference value for a "failed" run.
1649 known_good_value: The reference value for a "passed" run.
1651 Returns:
1652 True if the current_value is closer to the known_good_value than the
1653 known_bad_value.
1655 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
1656 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
1658 return dist_to_good_value < dist_to_bad_value
1660 def _GetDepotDirectory(self, depot_name):
1661 if depot_name == 'chromium':
1662 return self.src_cwd
1663 elif depot_name == 'cros':
1664 return self.cros_cwd
1665 elif depot_name in DEPOT_NAMES:
1666 return self.depot_cwd[depot_name]
1667 else:
1668 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
1669 ' was added without proper support?' %\
1670 (depot_name,)
1672 def ChangeToDepotWorkingDirectory(self, depot_name):
1673 """Given a depot, changes to the appropriate working directory.
1675 Args:
1676 depot_name: The name of the depot (see DEPOT_NAMES).
1678 os.chdir(self._GetDepotDirectory(depot_name))
1680 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
1681 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
1682 search_forward=True)
1683 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
1684 search_forward=False)
1685 min_revision_data['external']['v8_bleeding_edge'] = r1
1686 max_revision_data['external']['v8_bleeding_edge'] = r2
1688 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
1689 min_revision_data['revision']) or
1690 not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
1691 max_revision_data['revision'])):
1692 self.warnings.append('Trunk revisions in V8 did not map directly to '
1693 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
1694 'did map directly to bleeding_edge revisions, but results might not '
1695 'be valid.')
1697 def _FindNextDepotToBisect(self, current_depot, current_revision,
1698 min_revision_data, max_revision_data):
1699 """Given the state of the bisect, decides which depot the script should
1700 dive into next (if any).
1702 Args:
1703 current_depot: Current depot being bisected.
1704 current_revision: Current revision synced to.
1705 min_revision_data: Data about the earliest revision in the bisect range.
1706 max_revision_data: Data about the latest revision in the bisect range.
1708 Returns:
1709 The depot to bisect next, or None.
1711 external_depot = None
1712 for next_depot in DEPOT_NAMES:
1713 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
1714 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
1715 continue
1717 if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
1718 min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
1719 continue
1721 if current_depot == 'v8':
1722 # We grab the bleeding_edge info here rather than earlier because we
1723 # finally have the revision range. From that we can search forwards and
1724 # backwards to try to match trunk revisions to bleeding_edge.
1725 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
1727 if (min_revision_data['external'][next_depot] ==
1728 max_revision_data['external'][next_depot]):
1729 continue
1731 if (min_revision_data['external'][next_depot] and
1732 max_revision_data['external'][next_depot]):
1733 external_depot = next_depot
1734 break
1736 return external_depot
1738 def PrepareToBisectOnDepot(self,
1739 current_depot,
1740 end_revision,
1741 start_revision,
1742 previous_depot,
1743 previous_revision):
1744 """Changes to the appropriate directory and gathers a list of revisions
1745 to bisect between |start_revision| and |end_revision|.
1747 Args:
1748 current_depot: The depot we want to bisect.
1749 end_revision: End of the revision range.
1750 start_revision: Start of the revision range.
1751 previous_depot: The depot we were previously bisecting.
1752 previous_revision: The last revision we synced to on |previous_depot|.
1754 Returns:
1755 A list containing the revisions between |start_revision| and
1756 |end_revision| inclusive.
1758 # Change into working directory of external library to run
1759 # subsequent commands.
1760 self.ChangeToDepotWorkingDirectory(current_depot)
1762 # V8 (and possibly others) is merged in periodically. Bisecting
1763 # this directory directly won't give much good info.
1764 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
1765 config_path = os.path.join(self.src_cwd, '..')
1766 if bisect_utils.RunGClientAndCreateConfig(self.opts,
1767 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
1768 return []
1769 if bisect_utils.RunGClient(
1770 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
1771 return []
1773 if current_depot == 'v8_bleeding_edge':
1774 self.ChangeToDepotWorkingDirectory('chromium')
1776 shutil.move('v8', 'v8.bak')
1777 shutil.move('v8_bleeding_edge', 'v8')
1779 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
1780 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
1782 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
1783 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
1785 self.ChangeToDepotWorkingDirectory(current_depot)
1787 depot_revision_list = self.GetRevisionList(current_depot,
1788 end_revision,
1789 start_revision)
1791 self.ChangeToDepotWorkingDirectory('chromium')
1793 return depot_revision_list
1795 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
1796 """Gathers reference values by running the performance tests on the
1797 known good and bad revisions.
1799 Args:
1800 good_rev: The last known good revision where the performance regression
1801 has not occurred yet.
1802 bad_rev: A revision where the performance regression has already occurred.
1803 cmd: The command to execute the performance test.
1804 metric: The metric being tested for regression.
1806 Returns:
1807 A tuple with the results of building and running each revision.
1809 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
1810 target_depot,
1811 cmd,
1812 metric)
1814 good_run_results = None
1816 if not bad_run_results[1]:
1817 good_run_results = self.SyncBuildAndRunRevision(good_rev,
1818 target_depot,
1819 cmd,
1820 metric)
1822 return (bad_run_results, good_run_results)
1824 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
1825 """Adds new revisions to the revision_data dict and initializes them.
1827 Args:
1828 revisions: List of revisions to add.
1829 depot: Depot that's currently in use (src, webkit, etc...)
1830 sort: Sorting key for displaying revisions.
1831 revision_data: A dict to add the new revisions into. Existing revisions
1832 will have their sort keys offset.
1835 num_depot_revisions = len(revisions)
1837 for k, v in revision_data.iteritems():
1838 if v['sort'] > sort:
1839 v['sort'] += num_depot_revisions
1841 for i in xrange(num_depot_revisions):
1842 r = revisions[i]
1844 revision_data[r] = {'revision' : r,
1845 'depot' : depot,
1846 'value' : None,
1847 'perf_time' : 0,
1848 'build_time' : 0,
1849 'passed' : '?',
1850 'sort' : i + sort + 1}
1852 def PrintRevisionsToBisectMessage(self, revision_list, depot):
1853 if self.opts.output_buildbot_annotations:
1854 step_name = 'Bisection Range: [%s - %s]' % (
1855 revision_list[len(revision_list)-1], revision_list[0])
1856 bisect_utils.OutputAnnotationStepStart(step_name)
1858 print
1859 print 'Revisions to bisect on [%s]:' % depot
1860 for revision_id in revision_list:
1861 print ' -> %s' % (revision_id, )
1862 print
1864 if self.opts.output_buildbot_annotations:
1865 bisect_utils.OutputAnnotationStepClosed()
1867 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
1868 """Checks to see if changes to DEPS file occurred, and that the revision
1869 range also includes the change to .DEPS.git. If it doesn't, attempts to
1870 expand the revision range to include it.
1872 Args:
1873 bad_rev: First known bad revision.
1874 good_revision: Last known good revision.
1876 Returns:
1877 A tuple with the new bad and good revisions.
1879 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
1880 changes_to_deps = self.source_control.QueryFileRevisionHistory(
1881 'DEPS', good_revision, bad_revision)
1883 if changes_to_deps:
1884 # DEPS file was changed, search from the oldest change to DEPS file to
1885 # bad_revision to see if there are matching .DEPS.git changes.
1886 oldest_deps_change = changes_to_deps[-1]
1887 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
1888 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
1890 if len(changes_to_deps) != len(changes_to_gitdeps):
1891 # Grab the timestamp of the last DEPS change
1892 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
1893 output = CheckRunGit(cmd)
1894 commit_time = int(output)
1896 # Try looking for a commit that touches the .DEPS.git file in the
1897 # next 15 minutes after the DEPS file change.
1898 cmd = ['log', '--format=%H', '-1',
1899 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
1900 'origin/master', bisect_utils.FILE_DEPS_GIT]
1901 output = CheckRunGit(cmd)
1902 output = output.strip()
1903 if output:
1904 self.warnings.append('Detected change to DEPS and modified '
1905 'revision range to include change to .DEPS.git')
1906 return (output, good_revision)
1907 else:
1908 self.warnings.append('Detected change to DEPS but couldn\'t find '
1909 'matching change to .DEPS.git')
1910 return (bad_revision, good_revision)
1912 def CheckIfRevisionsInProperOrder(self,
1913 target_depot,
1914 good_revision,
1915 bad_revision):
1916 """Checks that |good_revision| is an earlier revision than |bad_revision|.
1918 Args:
1919 good_revision: Number/tag of the known good revision.
1920 bad_revision: Number/tag of the known bad revision.
1922 Returns:
1923 True if the revisions are in the proper order (good earlier than bad).
1925 if self.source_control.IsGit() and target_depot != 'cros':
1926 cmd = ['log', '--format=%ct', '-1', good_revision]
1927 cwd = self._GetDepotDirectory(target_depot)
1929 output = CheckRunGit(cmd, cwd=cwd)
1930 good_commit_time = int(output)
1932 cmd = ['log', '--format=%ct', '-1', bad_revision]
1933 output = CheckRunGit(cmd, cwd=cwd)
1934 bad_commit_time = int(output)
1936 return good_commit_time <= bad_commit_time
1937 else:
1938 # Cros/svn use integers
1939 return int(good_revision) <= int(bad_revision)
1941 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
1942 """Given known good and bad revisions, run a binary search on all
1943 intermediate revisions to determine the CL where the performance regression
1944 occurred.
1946 Args:
1947 command_to_run: Specify the command to execute the performance test.
1948 good_revision: Number/tag of the known good revision.
1949 bad_revision: Number/tag of the known bad revision.
1950 metric: The performance metric to monitor.
1952 Returns:
1953 A dict with 2 members, 'revision_data' and 'error'. On success,
1954 'revision_data' will contain a dict mapping revision ids to
1955 data about that revision. Each piece of revision data consists of a
1956 dict with the following keys:
1958 'passed': Represents whether the performance test was successful at
1959 that revision. Possible values include: 1 (passed), 0 (failed),
1960 '?' (skipped), 'F' (build failed).
1961 'depot': The depot that this revision is from (ie. WebKit)
1962 'external': If the revision is a 'src' revision, 'external' contains
1963 the revisions of each of the external libraries.
1964 'sort': A sort value for sorting the dict in order of commits.
1966 For example:
1968 'error':None,
1969 'revision_data':
1971 'CL #1':
1973 'passed':False,
1974 'depot':'chromium',
1975 'external':None,
1976 'sort':0
1981 If an error occurred, the 'error' field will contain the message and
1982 'revision_data' will be empty.
1985 results = {'revision_data' : {},
1986 'error' : None}
1988 # Choose depot to bisect first
1989 target_depot = 'chromium'
1990 if self.opts.target_platform == 'cros':
1991 target_depot = 'cros'
1992 elif self.opts.target_platform == 'android-chrome':
1993 target_depot = 'android-chrome'
1995 cwd = os.getcwd()
1996 self.ChangeToDepotWorkingDirectory(target_depot)
1998 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
1999 bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2000 target_depot, 100)
2001 good_revision = self.source_control.ResolveToRevision(good_revision_in,
2002 target_depot, -100)
2004 os.chdir(cwd)
2007 if bad_revision is None:
2008 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2009 return results
2011 if good_revision is None:
2012 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2013 return results
2015 # Check that they didn't accidentally swap good and bad revisions.
2016 if not self.CheckIfRevisionsInProperOrder(
2017 target_depot, good_revision, bad_revision):
2018 results['error'] = 'bad_revision < good_revision, did you swap these '\
2019 'by mistake?'
2020 return results
2022 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2023 bad_revision, good_revision)
2025 if self.opts.output_buildbot_annotations:
2026 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2028 print 'Gathering revision range for bisection.'
2030 # Retrieve a list of revisions to do bisection on.
2031 src_revision_list = self.GetRevisionList(target_depot,
2032 bad_revision,
2033 good_revision)
2035 if self.opts.output_buildbot_annotations:
2036 bisect_utils.OutputAnnotationStepClosed()
2038 if src_revision_list:
2039 # revision_data will store information about a revision such as the
2040 # depot it came from, the webkit/V8 revision at that time,
2041 # performance timing, build state, etc...
2042 revision_data = results['revision_data']
2044 # revision_list is the list we're binary searching through at the moment.
2045 revision_list = []
2047 sort_key_ids = 0
2049 for current_revision_id in src_revision_list:
2050 sort_key_ids += 1
2052 revision_data[current_revision_id] = {'value' : None,
2053 'passed' : '?',
2054 'depot' : target_depot,
2055 'external' : None,
2056 'perf_time' : 0,
2057 'build_time' : 0,
2058 'sort' : sort_key_ids}
2059 revision_list.append(current_revision_id)
2061 min_revision = 0
2062 max_revision = len(revision_list) - 1
2064 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2066 if self.opts.output_buildbot_annotations:
2067 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2069 print 'Gathering reference values for bisection.'
2071 # Perform the performance tests on the good and bad revisions, to get
2072 # reference values.
2073 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
2074 bad_revision,
2075 command_to_run,
2076 metric,
2077 target_depot)
2079 if self.opts.output_buildbot_annotations:
2080 bisect_utils.OutputAnnotationStepClosed()
2082 if bad_results[1]:
2083 results['error'] = ('An error occurred while building and running '
2084 'the \'bad\' reference value. The bisect cannot continue without '
2085 'a working \'bad\' revision to start from.\n\nError: %s' %
2086 bad_results[0])
2087 return results
2089 if good_results[1]:
2090 results['error'] = ('An error occurred while building and running '
2091 'the \'good\' reference value. The bisect cannot continue without '
2092 'a working \'good\' revision to start from.\n\nError: %s' %
2093 good_results[0])
2094 return results
2097 # We need these reference values to determine if later runs should be
2098 # classified as pass or fail.
2099 known_bad_value = bad_results[0]
2100 known_good_value = good_results[0]
2102 # Can just mark the good and bad revisions explicitly here since we
2103 # already know the results.
2104 bad_revision_data = revision_data[revision_list[0]]
2105 bad_revision_data['external'] = bad_results[2]
2106 bad_revision_data['perf_time'] = bad_results[3]
2107 bad_revision_data['build_time'] = bad_results[4]
2108 bad_revision_data['passed'] = False
2109 bad_revision_data['value'] = known_bad_value
2111 good_revision_data = revision_data[revision_list[max_revision]]
2112 good_revision_data['external'] = good_results[2]
2113 good_revision_data['perf_time'] = good_results[3]
2114 good_revision_data['build_time'] = good_results[4]
2115 good_revision_data['passed'] = True
2116 good_revision_data['value'] = known_good_value
2118 next_revision_depot = target_depot
2120 while True:
2121 if not revision_list:
2122 break
2124 min_revision_data = revision_data[revision_list[min_revision]]
2125 max_revision_data = revision_data[revision_list[max_revision]]
2127 if max_revision - min_revision <= 1:
2128 current_depot = min_revision_data['depot']
2129 if min_revision_data['passed'] == '?':
2130 next_revision_index = min_revision
2131 elif max_revision_data['passed'] == '?':
2132 next_revision_index = max_revision
2133 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2134 previous_revision = revision_list[min_revision]
2135 # If there were changes to any of the external libraries we track,
2136 # should bisect the changes there as well.
2137 external_depot = self._FindNextDepotToBisect(current_depot,
2138 previous_revision, min_revision_data, max_revision_data)
2140 # If there was no change in any of the external depots, the search
2141 # is over.
2142 if not external_depot:
2143 if current_depot == 'v8':
2144 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2145 'continue any further. The script can only bisect into '
2146 'V8\'s bleeding_edge repository if both the current and '
2147 'previous revisions in trunk map directly to revisions in '
2148 'bleeding_edge.')
2149 break
2151 earliest_revision = max_revision_data['external'][external_depot]
2152 latest_revision = min_revision_data['external'][external_depot]
2154 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
2155 latest_revision,
2156 earliest_revision,
2157 next_revision_depot,
2158 previous_revision)
2160 if not new_revision_list:
2161 results['error'] = 'An error occurred attempting to retrieve'\
2162 ' revision range: [%s..%s]' %\
2163 (earliest_revision, latest_revision)
2164 return results
2166 self.AddRevisionsIntoRevisionData(new_revision_list,
2167 external_depot,
2168 min_revision_data['sort'],
2169 revision_data)
2171 # Reset the bisection and perform it on the newly inserted
2172 # changelists.
2173 revision_list = new_revision_list
2174 min_revision = 0
2175 max_revision = len(revision_list) - 1
2176 sort_key_ids += len(revision_list)
2178 print 'Regression in metric:%s appears to be the result of changes'\
2179 ' in [%s].' % (metric, external_depot)
2181 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
2183 continue
2184 else:
2185 break
2186 else:
2187 next_revision_index = int((max_revision - min_revision) / 2) +\
2188 min_revision
2190 next_revision_id = revision_list[next_revision_index]
2191 next_revision_data = revision_data[next_revision_id]
2192 next_revision_depot = next_revision_data['depot']
2194 self.ChangeToDepotWorkingDirectory(next_revision_depot)
2196 if self.opts.output_buildbot_annotations:
2197 step_name = 'Working on [%s]' % next_revision_id
2198 bisect_utils.OutputAnnotationStepStart(step_name)
2200 print 'Working on revision: [%s]' % next_revision_id
2202 run_results = self.SyncBuildAndRunRevision(next_revision_id,
2203 next_revision_depot,
2204 command_to_run,
2205 metric, skippable=True)
2207 # If the build is successful, check whether or not the metric
2208 # had regressed.
2209 if not run_results[1]:
2210 if len(run_results) > 2:
2211 next_revision_data['external'] = run_results[2]
2212 next_revision_data['perf_time'] = run_results[3]
2213 next_revision_data['build_time'] = run_results[4]
2215 passed_regression = self.CheckIfRunPassed(run_results[0],
2216 known_good_value,
2217 known_bad_value)
2219 next_revision_data['passed'] = passed_regression
2220 next_revision_data['value'] = run_results[0]
2222 if passed_regression:
2223 max_revision = next_revision_index
2224 else:
2225 min_revision = next_revision_index
2226 else:
2227 if run_results[1] == BUILD_RESULT_SKIPPED:
2228 next_revision_data['passed'] = 'Skipped'
2229 elif run_results[1] == BUILD_RESULT_FAIL:
2230 next_revision_data['passed'] = 'Build Failed'
2232 print run_results[0]
2234 # If the build is broken, remove it and redo search.
2235 revision_list.pop(next_revision_index)
2237 max_revision -= 1
2239 if self.opts.output_buildbot_annotations:
2240 self._PrintPartialResults(results)
2241 bisect_utils.OutputAnnotationStepClosed()
2242 else:
2243 # Weren't able to sync and retrieve the revision range.
2244 results['error'] = 'An error occurred attempting to retrieve revision '\
2245 'range: [%s..%s]' % (good_revision, bad_revision)
2247 return results
2249 def _PrintPartialResults(self, results_dict):
2250 revision_data = results_dict['revision_data']
2251 revision_data_sorted = sorted(revision_data.iteritems(),
2252 key = lambda x: x[1]['sort'])
2253 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2254 first_working_revision = results_dict['first_working_revision']
2255 last_broken_revision = results_dict['last_broken_revision']
2257 self._PrintTestedCommitsTable(revision_data_sorted,
2258 results_dict['first_working_revision'],
2259 results_dict['last_broken_revision'],
2260 100, final_step=False)
2262 def _PrintConfidence(self, results_dict):
2263 # The perf dashboard specifically looks for the string
2264 # "Confidence in Bisection Results: 100%" to decide whether or not
2265 # to cc the author(s). If you change this, please update the perf
2266 # dashboard as well.
2267 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
2269 def _PrintBanner(self, results_dict):
2270 print
2271 print " __o_\___ Aw Snap! We hit a speed bump!"
2272 print "=-O----O-'__.~.___________________________________"
2273 print
2274 print 'Bisect reproduced a %.02f%% (+-%.02f%%) change in the %s metric.' % (
2275 results_dict['regression_size'], results_dict['regression_std_err'],
2276 '/'.join(self.opts.metric))
2277 self._PrintConfidence(results_dict)
2279 def _PrintFailedBanner(self, results_dict):
2280 print
2281 print ('Bisect could not reproduce a change in the '
2282 '%s/%s metric.' % (self.opts.metric[0], self.opts.metric[1]))
2283 print
2284 self._PrintConfidence(results_dict)
2286 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
2287 info = self.source_control.QueryRevisionInfo(cl,
2288 self._GetDepotDirectory(depot))
2289 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
2290 try:
2291 # Format is "git-svn-id: svn://....@123456 <other data>"
2292 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
2293 svn_revision = svn_line[0].split('@')
2294 svn_revision = svn_revision[1].split(' ')[0]
2295 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
2296 except IndexError:
2297 return ''
2298 return ''
2300 def _PrintRevisionInfo(self, cl, info, depot=None):
2301 # The perf dashboard specifically looks for the string
2302 # "Author : " to parse out who to cc on a bug. If you change the
2303 # formatting here, please update the perf dashboard as well.
2304 print
2305 print 'Subject : %s' % info['subject']
2306 print 'Author : %s' % info['author']
2307 if not info['email'].startswith(info['author']):
2308 print 'Email : %s' % info['email']
2309 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
2310 if commit_link:
2311 print 'Link : %s' % commit_link
2312 else:
2313 print
2314 print 'Failed to parse svn revision from body:'
2315 print
2316 print info['body']
2317 print
2318 print 'Commit : %s' % cl
2319 print 'Date : %s' % info['date']
2321 def _PrintTestedCommitsTable(self, revision_data_sorted,
2322 first_working_revision, last_broken_revision, confidence,
2323 final_step=True):
2324 print
2325 if final_step:
2326 print 'Tested commits:'
2327 else:
2328 print 'Partial results:'
2329 print ' %20s %70s %12s %14s %13s' % ('Depot'.center(20, ' '),
2330 'Commit SHA'.center(70, ' '), 'Mean'.center(12, ' '),
2331 'Std. Error'.center(14, ' '), 'State'.center(13, ' '))
2332 state = 0
2333 for current_id, current_data in revision_data_sorted:
2334 if current_data['value']:
2335 if (current_id == last_broken_revision or
2336 current_id == first_working_revision):
2337 # If confidence is too low, don't add this empty line since it's
2338 # used to put focus on a suspected CL.
2339 if confidence and final_step:
2340 print
2341 state += 1
2342 if state == 2 and not final_step:
2343 # Just want a separation between "bad" and "good" cl's.
2344 print
2346 state_str = 'Bad'
2347 if state == 1 and final_step:
2348 state_str = 'Suspected CL'
2349 elif state == 2:
2350 state_str = 'Good'
2352 # If confidence is too low, don't bother outputting good/bad.
2353 if not confidence:
2354 state_str = ''
2355 state_str = state_str.center(13, ' ')
2357 std_error = ('+-%.02f' %
2358 current_data['value']['std_err']).center(14, ' ')
2359 mean = ('%.02f' % current_data['value']['mean']).center(12, ' ')
2360 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
2361 current_data['depot'])
2362 if not cl_link:
2363 cl_link = current_id
2364 print ' %20s %70s %12s %14s %13s' % (
2365 current_data['depot'].center(20, ' '), cl_link.center(70, ' '),
2366 mean, std_error, state_str)
2368 def _PrintReproSteps(self):
2369 print
2370 print 'To reproduce locally:'
2371 print '$ ' + self.opts.command
2372 if bisect_utils.IsTelemetryCommand(self.opts.command):
2373 print
2374 print 'Also consider passing --profiler=list to see available profilers.'
2376 def _PrintOtherRegressions(self, other_regressions, revision_data):
2377 print
2378 print 'Other regressions may have occurred:'
2379 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
2380 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
2381 for regression in other_regressions:
2382 current_id, previous_id, confidence = regression
2383 current_data = revision_data[current_id]
2384 previous_data = revision_data[previous_id]
2386 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
2387 current_data['depot'])
2388 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
2389 previous_data['depot'])
2391 # If we can't map it to a viewable URL, at least show the original hash.
2392 if not current_link:
2393 current_link = current_id
2394 if not previous_link:
2395 previous_link = previous_id
2397 print ' %8s %70s %s' % (
2398 current_data['depot'], current_link,
2399 ('%d%%' % confidence).center(10, ' '))
2400 print ' %8s %70s' % (
2401 previous_data['depot'], previous_link)
2402 print
2404 def _PrintStepTime(self, revision_data_sorted):
2405 step_perf_time_avg = 0.0
2406 step_build_time_avg = 0.0
2407 step_count = 0.0
2408 for _, current_data in revision_data_sorted:
2409 if current_data['value']:
2410 step_perf_time_avg += current_data['perf_time']
2411 step_build_time_avg += current_data['build_time']
2412 step_count += 1
2413 if step_count:
2414 step_perf_time_avg = step_perf_time_avg / step_count
2415 step_build_time_avg = step_build_time_avg / step_count
2416 print
2417 print 'Average build time : %s' % datetime.timedelta(
2418 seconds=int(step_build_time_avg))
2419 print 'Average test time : %s' % datetime.timedelta(
2420 seconds=int(step_perf_time_avg))
2422 def _PrintWarnings(self):
2423 if not self.warnings:
2424 return
2425 print
2426 print 'WARNINGS:'
2427 for w in set(self.warnings):
2428 print ' !!! %s' % w
2430 def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
2431 other_regressions = []
2432 previous_values = []
2433 previous_id = None
2434 for current_id, current_data in revision_data_sorted:
2435 current_values = current_data['value']
2436 if current_values:
2437 current_values = current_values['values']
2438 if previous_values:
2439 confidence = self._CalculateConfidence(previous_values,
2440 [current_values])
2441 mean_of_prev_runs = CalculateTruncatedMean(
2442 sum(previous_values, []), 0)
2443 mean_of_current_runs = CalculateTruncatedMean(current_values, 0)
2445 # Check that the potential regression is in the same direction as
2446 # the overall regression. If the mean of the previous runs < the
2447 # mean of the current runs, this local regression is in same
2448 # direction.
2449 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
2450 is_same_direction = (prev_less_than_current if
2451 bad_greater_than_good else not prev_less_than_current)
2453 # Only report potential regressions with high confidence.
2454 if is_same_direction and confidence > 50:
2455 other_regressions.append([current_id, previous_id, confidence])
2456 previous_values.append(current_values)
2457 previous_id = current_id
2458 return other_regressions
2460 def _CalculateConfidence(self, working_means, broken_means):
2461 bounds_working = []
2462 bounds_broken = []
2463 for m in working_means:
2464 current_mean = CalculateTruncatedMean(m, 0)
2465 if bounds_working:
2466 bounds_working[0] = min(current_mean, bounds_working[0])
2467 bounds_working[1] = max(current_mean, bounds_working[0])
2468 else:
2469 bounds_working = [current_mean, current_mean]
2470 for m in broken_means:
2471 current_mean = CalculateTruncatedMean(m, 0)
2472 if bounds_broken:
2473 bounds_broken[0] = min(current_mean, bounds_broken[0])
2474 bounds_broken[1] = max(current_mean, bounds_broken[0])
2475 else:
2476 bounds_broken = [current_mean, current_mean]
2477 dist_between_groups = min(math.fabs(bounds_broken[1] - bounds_working[0]),
2478 math.fabs(bounds_broken[0] - bounds_working[1]))
2479 working_mean = sum(working_means, [])
2480 broken_mean = sum(broken_means, [])
2481 len_working_group = CalculateStandardDeviation(working_mean)
2482 len_broken_group = CalculateStandardDeviation(broken_mean)
2484 confidence = (dist_between_groups / (
2485 max(0.0001, (len_broken_group + len_working_group ))))
2486 confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
2487 return confidence
2489 def _GetResultsDict(self, revision_data, revision_data_sorted):
2490 # Find range where it possibly broke.
2491 first_working_revision = None
2492 first_working_revision_index = -1
2493 last_broken_revision = None
2494 last_broken_revision_index = -1
2496 for i in xrange(len(revision_data_sorted)):
2497 k, v = revision_data_sorted[i]
2498 if v['passed'] == 1:
2499 if not first_working_revision:
2500 first_working_revision = k
2501 first_working_revision_index = i
2503 if not v['passed']:
2504 last_broken_revision = k
2505 last_broken_revision_index = i
2507 if last_broken_revision != None and first_working_revision != None:
2508 broken_means = []
2509 for i in xrange(0, last_broken_revision_index + 1):
2510 if revision_data_sorted[i][1]['value']:
2511 broken_means.append(revision_data_sorted[i][1]['value']['values'])
2513 working_means = []
2514 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
2515 if revision_data_sorted[i][1]['value']:
2516 working_means.append(revision_data_sorted[i][1]['value']['values'])
2518 # Flatten the lists to calculate mean of all values.
2519 working_mean = sum(working_means, [])
2520 broken_mean = sum(broken_means, [])
2522 # Calculate the approximate size of the regression
2523 mean_of_bad_runs = CalculateTruncatedMean(broken_mean, 0.0)
2524 mean_of_good_runs = CalculateTruncatedMean(working_mean, 0.0)
2526 regression_size = math.fabs(max(mean_of_good_runs, mean_of_bad_runs) /
2527 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0 - 100.0
2529 regression_std_err = math.fabs(CalculatePooledStandardError(
2530 [working_mean, broken_mean]) /
2531 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
2533 # Give a "confidence" in the bisect. At the moment we use how distinct the
2534 # values are before and after the last broken revision, and how noisy the
2535 # overall graph is.
2536 confidence = self._CalculateConfidence(working_means, broken_means)
2538 culprit_revisions = []
2540 cwd = os.getcwd()
2541 self.ChangeToDepotWorkingDirectory(
2542 revision_data[last_broken_revision]['depot'])
2544 if revision_data[last_broken_revision]['depot'] == 'cros':
2545 # Want to get a list of all the commits and what depots they belong
2546 # to so that we can grab info about each.
2547 cmd = ['repo', 'forall', '-c',
2548 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2549 last_broken_revision, first_working_revision + 1)]
2550 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
2552 changes = []
2553 assert not return_code, 'An error occurred while running'\
2554 ' "%s"' % ' '.join(cmd)
2555 last_depot = None
2556 cwd = os.getcwd()
2557 for l in output.split('\n'):
2558 if l:
2559 # Output will be in form:
2560 # /path_to_depot
2561 # /path_to_other_depot
2562 # <SHA1>
2563 # /path_again
2564 # <SHA1>
2565 # etc.
2566 if l[0] == '/':
2567 last_depot = l
2568 else:
2569 contents = l.split(' ')
2570 if len(contents) > 1:
2571 changes.append([last_depot, contents[0]])
2572 for c in changes:
2573 os.chdir(c[0])
2574 info = self.source_control.QueryRevisionInfo(c[1])
2575 culprit_revisions.append((c[1], info, None))
2576 else:
2577 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
2578 k, v = revision_data_sorted[i]
2579 if k == first_working_revision:
2580 break
2581 self.ChangeToDepotWorkingDirectory(v['depot'])
2582 info = self.source_control.QueryRevisionInfo(k)
2583 culprit_revisions.append((k, info, v['depot']))
2584 os.chdir(cwd)
2586 # Check for any other possible regression ranges
2587 other_regressions = self._FindOtherRegressions(revision_data_sorted,
2588 mean_of_bad_runs > mean_of_good_runs)
2590 # Check for warnings:
2591 if len(culprit_revisions) > 1:
2592 self.warnings.append('Due to build errors, regression range could '
2593 'not be narrowed down to a single commit.')
2594 if self.opts.repeat_test_count == 1:
2595 self.warnings.append('Tests were only set to run once. This may '
2596 'be insufficient to get meaningful results.')
2597 if confidence < 100:
2598 if confidence:
2599 self.warnings.append(
2600 'Confidence is less than 100%. There could be other candidates for '
2601 'this regression. Try bisecting again with increased repeat_count '
2602 'or on a sub-metric that shows the regression more clearly.')
2603 else:
2604 self.warnings.append(
2605 'Confidence is 0%. Try bisecting again on another platform, with '
2606 'increased repeat_count or on a sub-metric that shows the regression '
2607 'more clearly.')
2609 return {
2610 'first_working_revision': first_working_revision,
2611 'last_broken_revision': last_broken_revision,
2612 'culprit_revisions': culprit_revisions,
2613 'other_regressions': other_regressions,
2614 'regression_size': regression_size,
2615 'regression_std_err': regression_std_err,
2616 'confidence': confidence,
2619 def FormatAndPrintResults(self, bisect_results):
2620 """Prints the results from a bisection run in a readable format.
2622 Args
2623 bisect_results: The results from a bisection test run.
2625 revision_data = bisect_results['revision_data']
2626 revision_data_sorted = sorted(revision_data.iteritems(),
2627 key = lambda x: x[1]['sort'])
2628 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2630 if self.opts.output_buildbot_annotations:
2631 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
2633 print
2634 print 'Full results of bisection:'
2635 for current_id, current_data in revision_data_sorted:
2636 build_status = current_data['passed']
2638 if type(build_status) is bool:
2639 if build_status:
2640 build_status = 'Good'
2641 else:
2642 build_status = 'Bad'
2644 print ' %20s %40s %s' % (current_data['depot'],
2645 current_id, build_status)
2646 print
2648 if self.opts.output_buildbot_annotations:
2649 bisect_utils.OutputAnnotationStepClosed()
2650 # The perf dashboard scrapes the "results" step in order to comment on
2651 # bugs. If you change this, please update the perf dashboard as well.
2652 bisect_utils.OutputAnnotationStepStart('Results')
2654 if results_dict['culprit_revisions'] and results_dict['confidence']:
2655 self._PrintBanner(results_dict)
2656 for culprit in results_dict['culprit_revisions']:
2657 cl, info, depot = culprit
2658 self._PrintRevisionInfo(cl, info, depot)
2659 self._PrintReproSteps()
2660 if results_dict['other_regressions']:
2661 self._PrintOtherRegressions(results_dict['other_regressions'],
2662 revision_data)
2663 else:
2664 self._PrintFailedBanner(results_dict)
2665 self._PrintReproSteps()
2667 self._PrintTestedCommitsTable(revision_data_sorted,
2668 results_dict['first_working_revision'],
2669 results_dict['last_broken_revision'],
2670 results_dict['confidence'])
2671 self._PrintStepTime(revision_data_sorted)
2672 self._PrintWarnings()
2674 if self.opts.output_buildbot_annotations:
2675 bisect_utils.OutputAnnotationStepClosed()
2678 def DetermineAndCreateSourceControl(opts):
2679 """Attempts to determine the underlying source control workflow and returns
2680 a SourceControl object.
2682 Returns:
2683 An instance of a SourceControl object, or None if the current workflow
2684 is unsupported.
2687 (output, return_code) = RunGit(['rev-parse', '--is-inside-work-tree'])
2689 if output.strip() == 'true':
2690 return GitSourceControl(opts)
2692 return None
2695 def IsPlatformSupported(opts):
2696 """Checks that this platform and build system are supported.
2698 Args:
2699 opts: The options parsed from the command line.
2701 Returns:
2702 True if the platform and build system are supported.
2704 # Haven't tested the script out on any other platforms yet.
2705 supported = ['posix', 'nt']
2706 return os.name in supported
2709 def RmTreeAndMkDir(path_to_dir):
2710 """Removes the directory tree specified, and then creates an empty
2711 directory in the same location.
2713 Args:
2714 path_to_dir: Path to the directory tree.
2716 Returns:
2717 True if successful, False if an error occurred.
2719 try:
2720 if os.path.exists(path_to_dir):
2721 shutil.rmtree(path_to_dir)
2722 except OSError, e:
2723 if e.errno != errno.ENOENT:
2724 return False
2726 try:
2727 os.makedirs(path_to_dir)
2728 except OSError, e:
2729 if e.errno != errno.EEXIST:
2730 return False
2732 return True
2735 def RemoveBuildFiles():
2736 """Removes build files from previous runs."""
2737 if RmTreeAndMkDir(os.path.join('out', 'Release')):
2738 if RmTreeAndMkDir(os.path.join('build', 'Release')):
2739 return True
2740 return False
2743 class BisectOptions(object):
2744 """Options to be used when running bisection."""
2745 def __init__(self):
2746 super(BisectOptions, self).__init__()
2748 self.target_platform = 'chromium'
2749 self.build_preference = None
2750 self.good_revision = None
2751 self.bad_revision = None
2752 self.use_goma = None
2753 self.cros_board = None
2754 self.cros_remote_ip = None
2755 self.repeat_test_count = 20
2756 self.truncate_percent = 25
2757 self.max_time_minutes = 20
2758 self.metric = None
2759 self.command = None
2760 self.output_buildbot_annotations = None
2761 self.no_custom_deps = False
2762 self.working_directory = None
2763 self.extra_src = None
2764 self.debug_ignore_build = None
2765 self.debug_ignore_sync = None
2766 self.debug_ignore_perf_test = None
2768 def _CreateCommandLineParser(self):
2769 """Creates a parser with bisect options.
2771 Returns:
2772 An instance of optparse.OptionParser.
2774 usage = ('%prog [options] [-- chromium-options]\n'
2775 'Perform binary search on revision history to find a minimal '
2776 'range of revisions where a peformance metric regressed.\n')
2778 parser = optparse.OptionParser(usage=usage)
2780 group = optparse.OptionGroup(parser, 'Bisect options')
2781 group.add_option('-c', '--command',
2782 type='str',
2783 help='A command to execute your performance test at' +
2784 ' each point in the bisection.')
2785 group.add_option('-b', '--bad_revision',
2786 type='str',
2787 help='A bad revision to start bisection. ' +
2788 'Must be later than good revision. May be either a git' +
2789 ' or svn revision.')
2790 group.add_option('-g', '--good_revision',
2791 type='str',
2792 help='A revision to start bisection where performance' +
2793 ' test is known to pass. Must be earlier than the ' +
2794 'bad revision. May be either a git or svn revision.')
2795 group.add_option('-m', '--metric',
2796 type='str',
2797 help='The desired metric to bisect on. For example ' +
2798 '"vm_rss_final_b/vm_rss_f_b"')
2799 group.add_option('-r', '--repeat_test_count',
2800 type='int',
2801 default=20,
2802 help='The number of times to repeat the performance '
2803 'test. Values will be clamped to range [1, 100]. '
2804 'Default value is 20.')
2805 group.add_option('--max_time_minutes',
2806 type='int',
2807 default=20,
2808 help='The maximum time (in minutes) to take running the '
2809 'performance tests. The script will run the performance '
2810 'tests according to --repeat_test_count, so long as it '
2811 'doesn\'t exceed --max_time_minutes. Values will be '
2812 'clamped to range [1, 60].'
2813 'Default value is 20.')
2814 group.add_option('-t', '--truncate_percent',
2815 type='int',
2816 default=25,
2817 help='The highest/lowest % are discarded to form a '
2818 'truncated mean. Values will be clamped to range [0, '
2819 '25]. Default value is 25 (highest/lowest 25% will be '
2820 'discarded).')
2821 parser.add_option_group(group)
2823 group = optparse.OptionGroup(parser, 'Build options')
2824 group.add_option('-w', '--working_directory',
2825 type='str',
2826 help='Path to the working directory where the script '
2827 'will do an initial checkout of the chromium depot. The '
2828 'files will be placed in a subdirectory "bisect" under '
2829 'working_directory and that will be used to perform the '
2830 'bisection. This parameter is optional, if it is not '
2831 'supplied, the script will work from the current depot.')
2832 group.add_option('--build_preference',
2833 type='choice',
2834 choices=['msvs', 'ninja', 'make'],
2835 help='The preferred build system to use. On linux/mac '
2836 'the options are make/ninja. On Windows, the options '
2837 'are msvs/ninja.')
2838 group.add_option('--target_platform',
2839 type='choice',
2840 choices=['chromium', 'cros', 'android', 'android-chrome'],
2841 default='chromium',
2842 help='The target platform. Choices are "chromium" '
2843 '(current platform), "cros", or "android". If you '
2844 'specify something other than "chromium", you must be '
2845 'properly set up to build that platform.')
2846 group.add_option('--no_custom_deps',
2847 dest='no_custom_deps',
2848 action="store_true",
2849 default=False,
2850 help='Run the script with custom_deps or not.')
2851 group.add_option('--extra_src',
2852 type='str',
2853 help='Path to a script which can be used to modify '
2854 'the bisect script\'s behavior.')
2855 group.add_option('--cros_board',
2856 type='str',
2857 help='The cros board type to build.')
2858 group.add_option('--cros_remote_ip',
2859 type='str',
2860 help='The remote machine to image to.')
2861 group.add_option('--use_goma',
2862 action="store_true",
2863 help='Add a bunch of extra threads for goma.')
2864 group.add_option('--output_buildbot_annotations',
2865 action="store_true",
2866 help='Add extra annotation output for buildbot.')
2867 parser.add_option_group(group)
2869 group = optparse.OptionGroup(parser, 'Debug options')
2870 group.add_option('--debug_ignore_build',
2871 action="store_true",
2872 help='DEBUG: Don\'t perform builds.')
2873 group.add_option('--debug_ignore_sync',
2874 action="store_true",
2875 help='DEBUG: Don\'t perform syncs.')
2876 group.add_option('--debug_ignore_perf_test',
2877 action="store_true",
2878 help='DEBUG: Don\'t perform performance tests.')
2879 parser.add_option_group(group)
2882 return parser
2884 def ParseCommandLine(self):
2885 """Parses the command line for bisect options."""
2886 parser = self._CreateCommandLineParser()
2887 (opts, args) = parser.parse_args()
2889 try:
2890 if not opts.command:
2891 raise RuntimeError('missing required parameter: --command')
2893 if not opts.good_revision:
2894 raise RuntimeError('missing required parameter: --good_revision')
2896 if not opts.bad_revision:
2897 raise RuntimeError('missing required parameter: --bad_revision')
2899 if not opts.metric:
2900 raise RuntimeError('missing required parameter: --metric')
2902 if opts.target_platform == 'cros':
2903 # Run sudo up front to make sure credentials are cached for later.
2904 print 'Sudo is required to build cros:'
2905 print
2906 RunProcess(['sudo', 'true'])
2908 if not opts.cros_board:
2909 raise RuntimeError('missing required parameter: --cros_board')
2911 if not opts.cros_remote_ip:
2912 raise RuntimeError('missing required parameter: --cros_remote_ip')
2914 if not opts.working_directory:
2915 raise RuntimeError('missing required parameter: --working_directory')
2917 metric_values = opts.metric.split('/')
2918 if len(metric_values) != 2:
2919 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
2921 opts.metric = metric_values
2922 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
2923 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
2924 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
2925 opts.truncate_percent = opts.truncate_percent / 100.0
2927 for k, v in opts.__dict__.iteritems():
2928 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
2929 setattr(self, k, v)
2930 except RuntimeError, e:
2931 output_string = StringIO.StringIO()
2932 parser.print_help(file=output_string)
2933 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
2934 output_string.close()
2935 raise RuntimeError(error_message)
2937 @staticmethod
2938 def FromDict(values):
2939 """Creates an instance of BisectOptions with the values parsed from a
2940 .cfg file.
2942 Args:
2943 values: a dict containing options to set.
2945 Returns:
2946 An instance of BisectOptions.
2948 opts = BisectOptions()
2950 for k, v in values.iteritems():
2951 assert hasattr(opts, k), 'Invalid %s attribute in '\
2952 'BisectOptions.' % k
2953 setattr(opts, k, v)
2955 metric_values = opts.metric.split('/')
2956 if len(metric_values) != 2:
2957 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
2959 opts.metric = metric_values
2960 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
2961 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
2962 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
2963 opts.truncate_percent = opts.truncate_percent / 100.0
2965 return opts
2968 def main():
2970 try:
2971 opts = BisectOptions()
2972 parse_results = opts.ParseCommandLine()
2974 if opts.extra_src:
2975 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
2976 if not extra_src:
2977 raise RuntimeError("Invalid or missing --extra_src.")
2978 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
2980 if opts.working_directory:
2981 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
2982 if opts.no_custom_deps:
2983 custom_deps = None
2984 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
2986 os.chdir(os.path.join(os.getcwd(), 'src'))
2988 if not RemoveBuildFiles():
2989 raise RuntimeError('Something went wrong removing the build files.')
2991 if not IsPlatformSupported(opts):
2992 raise RuntimeError("Sorry, this platform isn't supported yet.")
2994 # Check what source control method they're using. Only support git workflow
2995 # at the moment.
2996 source_control = DetermineAndCreateSourceControl(opts)
2998 if not source_control:
2999 raise RuntimeError("Sorry, only the git workflow is supported at the "
3000 "moment.")
3002 # gClient sync seems to fail if you're not in master branch.
3003 if (not source_control.IsInProperBranch() and
3004 not opts.debug_ignore_sync and
3005 not opts.working_directory):
3006 raise RuntimeError("You must switch to master branch to run bisection.")
3008 bisect_test = BisectPerformanceMetrics(source_control, opts)
3009 try:
3010 bisect_results = bisect_test.Run(opts.command,
3011 opts.bad_revision,
3012 opts.good_revision,
3013 opts.metric)
3014 if bisect_results['error']:
3015 raise RuntimeError(bisect_results['error'])
3016 bisect_test.FormatAndPrintResults(bisect_results)
3017 return 0
3018 finally:
3019 bisect_test.PerformCleanup()
3020 except RuntimeError, e:
3021 if opts.output_buildbot_annotations:
3022 # The perf dashboard scrapes the "results" step in order to comment on
3023 # bugs. If you change this, please update the perf dashboard as well.
3024 bisect_utils.OutputAnnotationStepStart('Results')
3025 print 'Error: %s' % e.message
3026 if opts.output_buildbot_annotations:
3027 bisect_utils.OutputAnnotationStepClosed()
3028 return 1
3030 if __name__ == '__main__':
3031 sys.exit(main())