2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
56 # The additional repositories that might need to be bisected.
57 # If the repository has any dependant repositories (such as skia/src needs
58 # skia/include and skia/gyp to be updated), specify them in the 'depends'
59 # so that they're synced appropriately.
61 # src: path to the working directory.
62 # recurse: True if this repositry will get bisected.
63 # depends: A list of other repositories that are actually part of the same
65 # svn: Needed for git workflow to resolve hashes to svn revisions.
66 # from: Parent depot that must be bisected before this is bisected.
73 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision='
76 "src" : "src/third_party/WebKit",
80 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision='
83 "src" : "src/third_party/angle_dx11",
84 "src_old" : "src/third_party/angle",
95 "custom_deps": bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
,
96 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
98 'v8_bleeding_edge' : {
99 "src" : "src/v8_bleeding_edge",
102 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
107 "src" : "src/third_party/skia/src",
109 "svn" : "http://skia.googlecode.com/svn/trunk/src",
110 "depends" : ['skia/include', 'skia/gyp'],
112 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
115 "src" : "src/third_party/skia/include",
117 "svn" : "http://skia.googlecode.com/svn/trunk/include",
120 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 "src" : "src/third_party/skia/gyp",
125 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
128 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
132 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
133 CROS_SDK_PATH
= os
.path
.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
134 CROS_VERSION_PATTERN
= 'new version number from %s'
135 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
136 CROS_TEST_KEY_PATH
= os
.path
.join('..', 'cros', 'chromite', 'ssh_keys',
138 CROS_SCRIPT_KEY_PATH
= os
.path
.join('..', 'cros', 'src', 'scripts',
139 'mod_for_test_scripts', 'ssh_keys',
142 BUILD_RESULT_SUCCEED
= 0
143 BUILD_RESULT_FAIL
= 1
144 BUILD_RESULT_SKIPPED
= 2
146 def CalculateTruncatedMean(data_set
, truncate_percent
):
147 """Calculates the truncated mean of a set of values.
150 data_set: Set of values to use in calculation.
151 truncate_percent: The % from the upper/lower portions of the data set to
152 discard, expressed as a value in [0, 1].
155 The truncated mean as a float.
157 if len(data_set
) > 2:
158 data_set
= sorted(data_set
)
160 discard_num_float
= len(data_set
) * truncate_percent
161 discard_num_int
= int(math
.floor(discard_num_float
))
162 kept_weight
= len(data_set
) - discard_num_float
* 2
164 data_set
= data_set
[discard_num_int
:len(data_set
)-discard_num_int
]
166 weight_left
= 1.0 - (discard_num_float
- discard_num_int
)
169 # If the % to discard leaves a fractional portion, need to weight those
171 unweighted_vals
= data_set
[1:len(data_set
)-1]
172 weighted_vals
= [data_set
[0], data_set
[len(data_set
)-1]]
173 weighted_vals
= [w
* weight_left
for w
in weighted_vals
]
174 data_set
= weighted_vals
+ unweighted_vals
176 kept_weight
= len(data_set
)
178 truncated_mean
= reduce(lambda x
, y
: float(x
) + float(y
),
179 data_set
) / kept_weight
181 return truncated_mean
184 def CalculateStandardDeviation(v
):
188 mean
= CalculateTruncatedMean(v
, 0.0)
189 variances
= [float(x
) - mean
for x
in v
]
190 variances
= [x
* x
for x
in variances
]
191 variance
= reduce(lambda x
, y
: float(x
) + float(y
), variances
) / (len(v
) - 1)
192 std_dev
= math
.sqrt(variance
)
197 def CalculatePooledStandardError(work_sets
):
202 for current_set
in work_sets
:
203 std_dev
= CalculateStandardDeviation(current_set
)
204 numerator
+= (len(current_set
) - 1) * std_dev
** 2
205 denominator1
+= len(current_set
) - 1
206 denominator2
+= 1.0 / len(current_set
)
209 return math
.sqrt(numerator
/ denominator1
) * math
.sqrt(denominator2
)
213 def CalculateStandardError(v
):
217 std_dev
= CalculateStandardDeviation(v
)
219 return std_dev
/ math
.sqrt(len(v
))
222 def IsStringFloat(string_to_check
):
223 """Checks whether or not the given string can be converted to a floating
227 string_to_check: Input string to check if it can be converted to a float.
230 True if the string can be converted to a float.
233 float(string_to_check
)
240 def IsStringInt(string_to_check
):
241 """Checks whether or not the given string can be converted to a integer.
244 string_to_check: Input string to check if it can be converted to an int.
247 True if the string can be converted to an int.
258 """Checks whether or not the script is running on Windows.
261 True if running on Windows.
263 return os
.name
== 'nt'
266 def RunProcess(command
):
267 """Run an arbitrary command. If output from the call is needed, use
268 RunProcessAndRetrieveOutput instead.
271 command: A list containing the command and args to execute.
274 The return code of the call.
276 # On Windows, use shell=True to get PATH interpretation.
278 return subprocess
.call(command
, shell
=shell
)
281 def RunProcessAndRetrieveOutput(command
):
282 """Run an arbitrary command, returning its output and return code. Since
283 output is collected via communicate(), there will be no output until the
284 call terminates. If you need output while the program runs (ie. so
285 that the buildbot doesn't terminate the script), consider RunProcess().
288 command: A list containing the command and args to execute.
289 print_output: Optional parameter to write output to stdout as it's
293 A tuple of the output and return code.
295 # On Windows, use shell=True to get PATH interpretation.
297 proc
= subprocess
.Popen(command
,
299 stdout
=subprocess
.PIPE
)
301 (output
, _
) = proc
.communicate()
303 return (output
, proc
.returncode
)
307 """Run a git subcommand, returning its output and return code.
310 command: A list containing the args to git.
313 A tuple of the output and return code.
315 command
= ['git'] + command
317 return RunProcessAndRetrieveOutput(command
)
320 def CheckRunGit(command
):
321 """Run a git subcommand, returning its output and return code. Asserts if
322 the return code of the call is non-zero.
325 command: A list containing the args to git.
328 A tuple of the output and return code.
330 (output
, return_code
) = RunGit(command
)
332 assert not return_code
, 'An error occurred while running'\
333 ' "git %s"' % ' '.join(command
)
337 def SetBuildSystemDefault(build_system
):
338 """Sets up any environment variables needed to build with the specified build
342 build_system: A string specifying build system. Currently only 'ninja' or
343 'make' are supported."""
344 if build_system
== 'ninja':
345 gyp_var
= os
.getenv('GYP_GENERATORS')
347 if not gyp_var
or not 'ninja' in gyp_var
:
349 os
.environ
['GYP_GENERATORS'] = gyp_var
+ ',ninja'
351 os
.environ
['GYP_GENERATORS'] = 'ninja'
354 os
.environ
['GYP_DEFINES'] = 'component=shared_library '\
355 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
357 elif build_system
== 'make':
358 os
.environ
['GYP_GENERATORS'] = 'make'
360 raise RuntimeError('%s build not supported.' % build_system
)
363 def BuildWithMake(threads
, targets
):
364 cmd
= ['make', 'BUILDTYPE=Release']
367 cmd
.append('-j%d' % threads
)
371 return_code
= RunProcess(cmd
)
373 return not return_code
376 def BuildWithNinja(threads
, targets
):
377 cmd
= ['ninja', '-C', os
.path
.join('out', 'Release')]
380 cmd
.append('-j%d' % threads
)
384 return_code
= RunProcess(cmd
)
386 return not return_code
389 def BuildWithVisualStudio(targets
):
390 path_to_devenv
= os
.path
.abspath(
391 os
.path
.join(os
.environ
['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
392 path_to_sln
= os
.path
.join(os
.getcwd(), 'chrome', 'chrome.sln')
393 cmd
= [path_to_devenv
, '/build', 'Release', path_to_sln
]
396 cmd
.extend(['/Project', t
])
398 return_code
= RunProcess(cmd
)
400 return not return_code
403 class Builder(object):
404 """Builder is used by the bisect script to build relevant targets and deploy.
406 def __init__(self
, opts
):
407 """Performs setup for building with target build system.
410 opts: Options parsed from command line.
413 if not opts
.build_preference
:
414 opts
.build_preference
= 'msvs'
416 if opts
.build_preference
== 'msvs':
417 if not os
.getenv('VS100COMNTOOLS'):
419 'Path to visual studio could not be determined.')
421 SetBuildSystemDefault(opts
.build_preference
)
423 if not opts
.build_preference
:
424 if 'ninja' in os
.getenv('GYP_GENERATORS'):
425 opts
.build_preference
= 'ninja'
427 opts
.build_preference
= 'make'
429 SetBuildSystemDefault(opts
.build_preference
)
431 if not bisect_utils
.SetupPlatformBuildEnvironment(opts
):
432 raise RuntimeError('Failed to set platform environment.')
434 bisect_utils
.RunGClient(['runhooks'])
439 if opts
.target_platform
== 'cros':
440 builder
= CrosBuilder(opts
)
441 elif opts
.target_platform
== 'android':
442 builder
= AndroidBuilder(opts
)
444 builder
= DesktopBuilder(opts
)
447 def Build(self
, depot
, opts
):
448 raise NotImplementedError()
451 class DesktopBuilder(Builder
):
452 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
453 def __init__(self
, opts
):
454 super(DesktopBuilder
, self
).__init
__(opts
)
456 def Build(self
, depot
, opts
):
457 """Builds chromium_builder_perf target using options passed into
461 depot: Current depot being bisected.
462 opts: The options parsed from the command line.
465 True if build was successful.
467 targets
= ['chromium_builder_perf']
473 build_success
= False
474 if opts
.build_preference
== 'make':
475 build_success
= BuildWithMake(threads
, targets
)
476 elif opts
.build_preference
== 'ninja':
477 build_success
= BuildWithNinja(threads
, targets
)
478 elif opts
.build_preference
== 'msvs':
479 assert IsWindows(), 'msvs is only supported on Windows.'
480 build_success
= BuildWithVisualStudio(targets
)
482 assert False, 'No build system defined.'
486 class AndroidBuilder(Builder
):
487 """AndroidBuilder is used to build on android."""
488 def __init__(self
, opts
):
489 super(AndroidBuilder
, self
).__init
__(opts
)
491 def InstallAPK(self
, opts
):
492 """Installs apk to device.
495 opts: The options parsed from the command line.
500 path_to_tool
= os
.path
.join('build', 'android', 'adb_install_apk.py')
501 cmd
= [path_to_tool
, '--apk', 'ChromiumTestShell.apk', '--apk_package',
502 'org.chromium.chrome.testshell', '--release']
503 return_code
= RunProcess(cmd
)
505 return not return_code
507 def Build(self
, depot
, opts
):
508 """Builds the android content shell and other necessary tools using options
509 passed into the script.
512 depot: Current depot being bisected.
513 opts: The options parsed from the command line.
516 True if build was successful.
518 targets
= ['chromium_testshell', 'cc_perftests_apk', 'forwarder2', 'md5sum']
524 build_success
= False
525 if opts
.build_preference
== 'ninja':
526 build_success
= BuildWithNinja(threads
, targets
)
528 assert False, 'No build system defined.'
531 build_success
= self
.InstallAPK(opts
)
536 class CrosBuilder(Builder
):
537 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
539 def __init__(self
, opts
):
540 super(CrosBuilder
, self
).__init
__(opts
)
542 def ImageToTarget(self
, opts
):
543 """Installs latest image to target specified by opts.cros_remote_ip.
546 opts: Program options containing cros_board and cros_remote_ip.
552 # Keys will most likely be set to 0640 after wiping the chroot.
553 os
.chmod(CROS_SCRIPT_KEY_PATH
, 0600)
554 os
.chmod(CROS_TEST_KEY_PATH
, 0600)
555 cmd
= [CROS_SDK_PATH
, '--', './bin/cros_image_to_target.py',
556 '--remote=%s' % opts
.cros_remote_ip
,
557 '--board=%s' % opts
.cros_board
, '--test', '--verbose']
559 return_code
= RunProcess(cmd
)
560 return not return_code
564 def BuildPackages(self
, opts
, depot
):
565 """Builds packages for cros.
568 opts: Program options containing cros_board.
569 depot: The depot being bisected.
574 cmd
= [CROS_SDK_PATH
]
577 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
578 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
583 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
585 cmd
+= ['BUILDTYPE=Release', './build_packages',
586 '--board=%s' % opts
.cros_board
]
587 return_code
= RunProcess(cmd
)
589 return not return_code
591 def BuildImage(self
, opts
, depot
):
592 """Builds test image for cros.
595 opts: Program options containing cros_board.
596 depot: The depot being bisected.
601 cmd
= [CROS_SDK_PATH
]
604 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
605 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
610 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
612 cmd
+= ['BUILDTYPE=Release', '--', './build_image',
613 '--board=%s' % opts
.cros_board
, 'test']
615 return_code
= RunProcess(cmd
)
617 return not return_code
619 def Build(self
, depot
, opts
):
620 """Builds targets using options passed into the script.
623 depot: Current depot being bisected.
624 opts: The options parsed from the command line.
627 True if build was successful.
629 if self
.BuildPackages(opts
, depot
):
630 if self
.BuildImage(opts
, depot
):
631 return self
.ImageToTarget(opts
)
635 class SourceControl(object):
636 """SourceControl is an abstraction over the underlying source control
637 system used for chromium. For now only git is supported, but in the
638 future, the svn workflow could be added as well."""
640 super(SourceControl
, self
).__init
__()
642 def SyncToRevisionWithGClient(self
, revision
):
643 """Uses gclient to sync to the specified revision.
645 ie. gclient sync --revision <revision>
648 revision: The git SHA1 or svn CL (depending on workflow).
651 The return code of the call.
653 return bisect_utils
.RunGClient(['sync', '--revision',
654 revision
, '--verbose', '--nohooks', '--reset', '--force'])
656 def SyncToRevisionWithRepo(self
, timestamp
):
657 """Uses repo to sync all the underlying git depots to the specified
661 timestamp: The unix timestamp to sync to.
664 The return code of the call.
666 return bisect_utils
.RunRepoSyncAtTimestamp(timestamp
)
669 class GitSourceControl(SourceControl
):
670 """GitSourceControl is used to query the underlying source control. """
671 def __init__(self
, opts
):
672 super(GitSourceControl
, self
).__init
__()
678 def GetRevisionList(self
, revision_range_end
, revision_range_start
):
679 """Retrieves a list of revisions between |revision_range_start| and
680 |revision_range_end|.
683 revision_range_end: The SHA1 for the end of the range.
684 revision_range_start: The SHA1 for the beginning of the range.
687 A list of the revisions between |revision_range_start| and
688 |revision_range_end| (inclusive).
690 revision_range
= '%s..%s' % (revision_range_start
, revision_range_end
)
691 cmd
= ['log', '--format=%H', '-10000', '--first-parent', revision_range
]
692 log_output
= CheckRunGit(cmd
)
694 revision_hash_list
= log_output
.split()
695 revision_hash_list
.append(revision_range_start
)
697 return revision_hash_list
699 def SyncToRevision(self
, revision
, sync_client
=None):
700 """Syncs to the specified revision.
703 revision: The revision to sync to.
704 use_gclient: Specifies whether or not we should sync using gclient or
705 just use source control directly.
712 results
= RunGit(['checkout', revision
])[1]
713 elif sync_client
== 'gclient':
714 results
= self
.SyncToRevisionWithGClient(revision
)
715 elif sync_client
== 'repo':
716 results
= self
.SyncToRevisionWithRepo(revision
)
720 def ResolveToRevision(self
, revision_to_check
, depot
, search
):
721 """If an SVN revision is supplied, try to resolve it to a git SHA1.
724 revision_to_check: The user supplied revision string that may need to be
725 resolved to a git SHA1.
726 depot: The depot the revision_to_check is from.
727 search: The number of changelists to try if the first fails to resolve
728 to a git hash. If the value is negative, the function will search
729 backwards chronologically, otherwise it will search forward.
732 A string containing a git SHA1 hash, otherwise None.
735 if not IsStringInt(revision_to_check
):
736 return revision_to_check
738 depot_svn
= 'svn://svn.chromium.org/chrome/trunk/src'
740 if depot
!= 'chromium':
741 depot_svn
= DEPOT_DEPS_NAME
[depot
]['svn']
743 svn_revision
= int(revision_to_check
)
747 search_range
= xrange(svn_revision
, svn_revision
+ search
, 1)
749 search_range
= xrange(svn_revision
, svn_revision
+ search
, -1)
751 for i
in search_range
:
752 svn_pattern
= 'git-svn-id: %s@%d' % (depot_svn
, i
)
753 cmd
= ['log', '--format=%H', '-1', '--grep', svn_pattern
,
756 (log_output
, return_code
) = RunGit(cmd
)
758 assert not return_code
, 'An error occurred while running'\
759 ' "git %s"' % ' '.join(cmd
)
762 log_output
= log_output
.strip()
765 git_revision
= log_output
771 if IsStringInt(revision_to_check
):
772 return int(revision_to_check
)
775 os
.chdir(os
.path
.join(os
.getcwd(), 'src', 'third_party',
776 'chromiumos-overlay'))
777 pattern
= CROS_VERSION_PATTERN
% revision_to_check
778 cmd
= ['log', '--format=%ct', '-1', '--grep', pattern
]
782 log_output
= CheckRunGit(cmd
)
784 git_revision
= log_output
785 git_revision
= int(log_output
.strip())
790 def IsInProperBranch(self
):
791 """Confirms they're in the master branch for performing the bisection.
792 This is needed or gclient will fail to sync properly.
795 True if the current branch on src is 'master'
797 cmd
= ['rev-parse', '--abbrev-ref', 'HEAD']
798 log_output
= CheckRunGit(cmd
)
799 log_output
= log_output
.strip()
801 return log_output
== "master"
803 def SVNFindRev(self
, revision
):
804 """Maps directly to the 'git svn find-rev' command.
807 revision: The git SHA1 to use.
810 An integer changelist #, otherwise None.
813 cmd
= ['svn', 'find-rev', revision
]
815 output
= CheckRunGit(cmd
)
816 svn_revision
= output
.strip()
818 if IsStringInt(svn_revision
):
819 return int(svn_revision
)
823 def QueryRevisionInfo(self
, revision
):
824 """Gathers information on a particular revision, such as author's name,
825 email, subject, and date.
828 revision: Revision you want to gather information on.
830 A dict in the following format:
841 formats
= ['%cN', '%cE', '%s', '%cD', '%b']
842 targets
= ['author', 'email', 'subject', 'date', 'body']
844 for i
in xrange(len(formats
)):
845 cmd
= ['log', '--format=%s' % formats
[i
], '-1', revision
]
846 output
= CheckRunGit(cmd
)
847 commit_info
[targets
[i
]] = output
.rstrip()
851 def CheckoutFileAtRevision(self
, file_name
, revision
):
852 """Performs a checkout on a file at the given revision.
857 return not RunGit(['checkout', revision
, file_name
])[1]
859 def RevertFileToHead(self
, file_name
):
860 """Unstages a file and returns it to HEAD.
865 # Reset doesn't seem to return 0 on success.
866 RunGit(['reset', 'HEAD', bisect_utils
.FILE_DEPS_GIT
])
868 return not RunGit(['checkout', bisect_utils
.FILE_DEPS_GIT
])[1]
870 def QueryFileRevisionHistory(self
, filename
, revision_start
, revision_end
):
871 """Returns a list of commits that modified this file.
874 filename: Name of file.
875 revision_start: Start of revision range.
876 revision_end: End of revision range.
879 Returns a list of commits that touched this file.
881 cmd
= ['log', '--format=%H', '%s~1..%s' % (revision_start
, revision_end
),
883 output
= CheckRunGit(cmd
)
885 return [o
for o
in output
.split('\n') if o
]
887 class BisectPerformanceMetrics(object):
888 """BisectPerformanceMetrics performs a bisection against a list of range
889 of revisions to narrow down where performance regressions may have
892 def __init__(self
, source_control
, opts
):
893 super(BisectPerformanceMetrics
, self
).__init
__()
896 self
.source_control
= source_control
897 self
.src_cwd
= os
.getcwd()
898 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
900 self
.cleanup_commands
= []
902 self
.builder
= Builder
.FromOpts(opts
)
904 # This always starts true since the script grabs latest first.
905 self
.was_blink
= True
907 for d
in DEPOT_NAMES
:
908 # The working directory of each depot is just the path to the depot, but
909 # since we're already in 'src', we can skip that part.
911 self
.depot_cwd
[d
] = os
.path
.join(
912 self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
914 def PerformCleanup(self
):
915 """Performs cleanup when script is finished."""
916 os
.chdir(self
.src_cwd
)
917 for c
in self
.cleanup_commands
:
919 shutil
.move(c
[1], c
[2])
921 assert False, 'Invalid cleanup command.'
923 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
924 """Retrieves a list of all the commits between the bad revision and
925 last known good revision."""
927 revision_work_list
= []
930 revision_range_start
= good_revision
931 revision_range_end
= bad_revision
934 self
.ChangeToDepotWorkingDirectory('cros')
936 # Print the commit timestamps for every commit in the revision time
937 # range. We'll sort them and bisect by that. There is a remote chance that
938 # 2 (or more) commits will share the exact same timestamp, but it's
939 # probably safe to ignore that case.
940 cmd
= ['repo', 'forall', '-c',
941 'git log --format=%%ct --before=%d --after=%d' % (
942 revision_range_end
, revision_range_start
)]
943 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
945 assert not return_code
, 'An error occurred while running'\
946 ' "%s"' % ' '.join(cmd
)
950 revision_work_list
= list(set(
951 [int(o
) for o
in output
.split('\n') if IsStringInt(o
)]))
952 revision_work_list
= sorted(revision_work_list
, reverse
=True)
954 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
957 return revision_work_list
959 def Get3rdPartyRevisionsFromCurrentRevision(self
, depot
, revision
):
960 """Parses the DEPS file to determine WebKit/v8/etc... versions.
963 A dict in the format {depot:revision} if successful, otherwise None.
967 self
.ChangeToDepotWorkingDirectory(depot
)
971 if depot
== 'chromium':
972 locals = {'Var': lambda _
: locals["vars"][_
],
973 'From': lambda *args
: None}
974 execfile(bisect_utils
.FILE_DEPS_GIT
, {}, locals)
978 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
980 for d
in DEPOT_NAMES
:
981 if DEPOT_DEPS_NAME
[d
].has_key('platform'):
982 if DEPOT_DEPS_NAME
[d
]['platform'] != os
.name
:
985 if DEPOT_DEPS_NAME
[d
]['recurse'] and\
986 DEPOT_DEPS_NAME
[d
]['from'] == depot
:
987 if (locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src']) or
988 locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src_old'])):
989 if locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src']):
990 re_results
= rxp
.search(locals['deps'][DEPOT_DEPS_NAME
[d
]['src']])
992 os
.path
.join(self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
993 elif locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src_old']):
995 rxp
.search(locals['deps'][DEPOT_DEPS_NAME
[d
]['src_old']])
997 os
.path
.join(self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src_old'][4:])
1000 results
[d
] = re_results
.group('revision')
1002 print 'Couldn\'t parse revision for %s.' % d
1006 print 'Couldn\'t find %s while parsing .DEPS.git.' % d
1009 elif depot
== 'cros':
1010 cmd
= [CROS_SDK_PATH
, '--', 'portageq-%s' % self
.opts
.cros_board
,
1011 'best_visible', '/build/%s' % self
.opts
.cros_board
, 'ebuild',
1012 CROS_CHROMEOS_PATTERN
]
1013 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1015 assert not return_code
, 'An error occurred while running'\
1016 ' "%s"' % ' '.join(cmd
)
1018 if len(output
) > CROS_CHROMEOS_PATTERN
:
1019 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
1022 output
= output
.split('_')[0]
1025 contents
= output
.split('.')
1027 version
= contents
[2]
1029 if contents
[3] != '0':
1030 warningText
= 'Chrome version: %s.%s but using %s.0 to bisect.' %\
1031 (version
, contents
[3], version
)
1032 if not warningText
in self
.warnings
:
1033 self
.warnings
.append(warningText
)
1036 self
.ChangeToDepotWorkingDirectory('chromium')
1037 return_code
= CheckRunGit(['log', '-1', '--format=%H',
1038 '--author=chrome-release@google.com', '--grep=to %s' % version
,
1042 results
['chromium'] = output
.strip()
1044 results
['v8_bleeding_edge'] = None
1046 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1048 if IsStringInt(svn_revision
):
1049 # V8 is tricky to bisect, in that there are only a few instances when
1050 # we can dive into bleeding_edge and get back a meaningful result.
1051 # Try to detect a V8 "business as usual" case, which is when:
1052 # 1. trunk revision N has description "Version X.Y.Z"
1053 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1054 # trunk. Now working on X.Y.(Z+1)."
1055 self
.ChangeToDepotWorkingDirectory(depot
)
1057 revision_info
= self
.source_control
.QueryRevisionInfo(revision
)
1059 version_re
= re
.compile("Version (?P<values>[0-9,.]+)")
1061 regex_results
= version_re
.search(revision_info
['subject'])
1064 version
= regex_results
.group('values')
1066 self
.ChangeToDepotWorkingDirectory('v8_bleeding_edge')
1068 git_revision
= self
.source_control
.ResolveToRevision(
1069 int(svn_revision
) - 1, 'v8_bleeding_edge', -1)
1072 revision_info
= self
.source_control
.QueryRevisionInfo(git_revision
)
1074 if 'Prepare push to trunk' in revision_info
['subject']:
1075 results
['v8_bleeding_edge'] = git_revision
1079 def BuildCurrentRevision(self
, depot
):
1080 """Builds chrome and performance_ui_tests on the current revision.
1083 True if the build was successful.
1085 if self
.opts
.debug_ignore_build
:
1089 os
.chdir(self
.src_cwd
)
1091 build_success
= self
.builder
.Build(depot
, self
.opts
)
1095 return build_success
1097 def RunGClientHooks(self
):
1098 """Runs gclient with runhooks command.
1101 True if gclient reports no errors.
1104 if self
.opts
.debug_ignore_build
:
1107 return not bisect_utils
.RunGClient(['runhooks'])
1109 def TryParseHistogramValuesFromOutput(self
, metric
, text
):
1110 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1113 metric: The metric as a list of [<trace>, <value>] strings.
1114 text: The text to parse the metric values from.
1117 A list of floating point numbers found.
1119 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
1121 text_lines
= text
.split('\n')
1124 for current_line
in text_lines
:
1125 if metric_formatted
in current_line
:
1126 current_line
= current_line
[len(metric_formatted
):]
1129 histogram_values
= eval(current_line
)
1131 for b
in histogram_values
['buckets']:
1132 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
1133 # Extends the list with N-elements with the average for that bucket.
1134 values_list
.extend([average_for_bucket
] * b
['count'])
1140 def TryParseResultValuesFromOutput(self
, metric
, text
):
1141 """Attempts to parse a metric in the format RESULT <graph: <trace>.
1144 metric: The metric as a list of [<trace>, <value>] strings.
1145 text: The text to parse the metric values from.
1148 A list of floating point numbers found.
1150 # Format is: RESULT <graph>: <trace>= <value> <units>
1151 metric_formatted
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
1153 text_lines
= text
.split('\n')
1156 for current_line
in text_lines
:
1157 # Parse the output from the performance test for the metric we're
1159 metric_re
= metric_formatted
+\
1160 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1161 metric_re
= re
.compile(metric_re
)
1162 regex_results
= metric_re
.search(current_line
)
1164 if not regex_results
is None:
1165 values_list
+= [regex_results
.group('values')]
1167 metric_re
= metric_formatted
+\
1168 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1169 metric_re
= re
.compile(metric_re
)
1170 regex_results
= metric_re
.search(current_line
)
1172 if not regex_results
is None:
1173 metric_values
= regex_results
.group('values')
1175 values_list
+= metric_values
.split(',')
1177 values_list
= [float(v
) for v
in values_list
if IsStringFloat(v
)]
1179 # If the metric is times/t, we need to sum the timings in order to get
1180 # similar regression results as the try-bots.
1181 metrics_to_sum
= [['times', 't'], ['times', 'page_load_time'],
1182 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1184 if metric
in metrics_to_sum
:
1186 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
1190 def ParseMetricValuesFromOutput(self
, metric
, text
):
1191 """Parses output from performance_ui_tests and retrieves the results for
1195 metric: The metric as a list of [<trace>, <value>] strings.
1196 text: The text to parse the metric values from.
1199 A list of floating point numbers found.
1201 metric_values
= self
.TryParseResultValuesFromOutput(metric
, text
)
1203 if not metric_values
:
1204 metric_values
= self
.TryParseHistogramValuesFromOutput(metric
, text
)
1206 return metric_values
1208 def _GenerateProfileIfNecessary(self
, command_args
):
1209 """Checks the command line of the performance test for dependencies on
1210 profile generation, and runs tools/perf/generate_profile as necessary.
1213 command_args: Command line being passed to performance test, as a list.
1216 False if profile generation was necessary and failed, otherwise True.
1219 if '--profile-dir' in ' '.join(command_args
):
1220 # If we were using python 2.7+, we could just use the argparse
1221 # module's parse_known_args to grab --profile-dir. Since some of the
1222 # bots still run 2.6, have to grab the arguments manually.
1224 args_to_parse
= ['--profile-dir', '--browser']
1226 for arg_to_parse
in args_to_parse
:
1227 for i
, current_arg
in enumerate(command_args
):
1228 if arg_to_parse
in current_arg
:
1229 current_arg_split
= current_arg
.split('=')
1231 # Check 2 cases, --arg=<val> and --arg <val>
1232 if len(current_arg_split
) == 2:
1233 arg_dict
[arg_to_parse
] = current_arg_split
[1]
1234 elif i
+ 1 < len(command_args
):
1235 arg_dict
[arg_to_parse
] = command_args
[i
+1]
1237 path_to_generate
= os
.path
.join('tools', 'perf', 'generate_profile')
1239 if arg_dict
.has_key('--profile-dir') and arg_dict
.has_key('--browser'):
1240 profile_path
, profile_type
= os
.path
.split(arg_dict
['--profile-dir'])
1241 return not RunProcess(['python', path_to_generate
,
1242 '--profile-type-to-generate', profile_type
,
1243 '--browser', arg_dict
['--browser'], '--output-dir', profile_path
])
1247 def RunPerformanceTestAndParseResults(self
, command_to_run
, metric
,
1248 reset_on_first_run
=False, upload_on_last_run
=False, results_label
=None):
1249 """Runs a performance test on the current revision by executing the
1250 'command_to_run' and parses the results.
1253 command_to_run: The command to be run to execute the performance test.
1254 metric: The metric to parse out from the results of the performance test.
1257 On success, it will return a tuple of the average value of the metric,
1258 and a success code of 0.
1261 if self
.opts
.debug_ignore_perf_test
:
1262 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1265 command_to_run
= command_to_run
.replace('/', r
'\\')
1267 args
= shlex
.split(command_to_run
)
1269 if not self
._GenerateProfileIfNecessary
(args
):
1270 return ('Failed to generate profile for performance test.', -1)
1272 # If running a telemetry test for cros, insert the remote ip, and
1273 # identity parameters.
1274 is_telemetry
= bisect_utils
.IsTelemetryCommand(command_to_run
)
1275 if self
.opts
.target_platform
== 'cros' and is_telemetry
:
1276 args
.append('--remote=%s' % self
.opts
.cros_remote_ip
)
1277 args
.append('--identity=%s' % CROS_TEST_KEY_PATH
)
1280 os
.chdir(self
.src_cwd
)
1282 start_time
= time
.time()
1285 output_of_all_runs
= ''
1286 for i
in xrange(self
.opts
.repeat_test_count
):
1287 # Can ignore the return code since if the tests fail, it won't return 0.
1289 current_args
= copy
.copy(args
)
1291 if i
== 0 and reset_on_first_run
:
1292 current_args
.append('--reset-results')
1293 elif i
== self
.opts
.repeat_test_count
- 1 and upload_on_last_run
:
1294 current_args
.append('--upload-results')
1296 current_args
.append('--results-label=%s' % results_label
)
1297 (output
, return_code
) = RunProcessAndRetrieveOutput(current_args
)
1299 if e
.errno
== errno
.ENOENT
:
1300 err_text
= ("Something went wrong running the performance test. "
1301 "Please review the command line:\n\n")
1302 if 'src/' in ' '.join(args
):
1303 err_text
+= ("Check that you haven't accidentally specified a path "
1304 "with src/ in the command.\n\n")
1305 err_text
+= ' '.join(args
)
1308 return (err_text
, -1)
1311 output_of_all_runs
+= output
1312 if self
.opts
.output_buildbot_annotations
:
1315 metric_values
+= self
.ParseMetricValuesFromOutput(metric
, output
)
1317 elapsed_minutes
= (time
.time() - start_time
) / 60.0
1319 if elapsed_minutes
>= self
.opts
.max_time_minutes
or not metric_values
:
1324 # Need to get the average value if there were multiple values.
1326 truncated_mean
= CalculateTruncatedMean(metric_values
,
1327 self
.opts
.truncate_percent
)
1328 standard_err
= CalculateStandardError(metric_values
)
1329 standard_dev
= CalculateStandardDeviation(metric_values
)
1332 'mean': truncated_mean
,
1333 'std_err': standard_err
,
1334 'std_dev': standard_dev
,
1335 'values': metric_values
,
1338 print 'Results of performance test: %12f %12f' % (
1339 truncated_mean
, standard_err
)
1341 return (values
, 0, output_of_all_runs
)
1343 return ('Invalid metric specified, or no values returned from '
1344 'performance test.', -1, output_of_all_runs
)
1346 def FindAllRevisionsToSync(self
, revision
, depot
):
1347 """Finds all dependant revisions and depots that need to be synced for a
1348 given revision. This is only useful in the git workflow, as an svn depot
1349 may be split into multiple mirrors.
1351 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1352 skia/include. To sync skia/src properly, one has to find the proper
1353 revisions in skia/gyp and skia/include.
1356 revision: The revision to sync to.
1357 depot: The depot in use at the moment (probably skia).
1360 A list of [depot, revision] pairs that need to be synced.
1362 revisions_to_sync
= [[depot
, revision
]]
1364 is_base
= (depot
== 'chromium') or (depot
== 'cros')
1366 # Some SVN depots were split into multiple git depots, so we need to
1367 # figure out for each mirror which git revision to grab. There's no
1368 # guarantee that the SVN revision will exist for each of the dependant
1369 # depots, so we have to grep the git logs and grab the next earlier one.
1371 DEPOT_DEPS_NAME
[depot
]['depends'] and\
1372 self
.source_control
.IsGit():
1373 svn_rev
= self
.source_control
.SVNFindRev(revision
)
1375 for d
in DEPOT_DEPS_NAME
[depot
]['depends']:
1376 self
.ChangeToDepotWorkingDirectory(d
)
1378 dependant_rev
= self
.source_control
.ResolveToRevision(svn_rev
, d
, -1000)
1381 revisions_to_sync
.append([d
, dependant_rev
])
1383 num_resolved
= len(revisions_to_sync
)
1384 num_needed
= len(DEPOT_DEPS_NAME
[depot
]['depends'])
1386 self
.ChangeToDepotWorkingDirectory(depot
)
1388 if not ((num_resolved
- 1) == num_needed
):
1391 return revisions_to_sync
1393 def PerformPreBuildCleanup(self
):
1394 """Performs necessary cleanup between runs."""
1395 print 'Cleaning up between runs.'
1398 # Having these pyc files around between runs can confuse the
1399 # perf tests and cause them to crash.
1400 for (path
, dir, files
) in os
.walk(self
.src_cwd
):
1401 for cur_file
in files
:
1402 if cur_file
.endswith('.pyc'):
1403 path_to_file
= os
.path
.join(path
, cur_file
)
1404 os
.remove(path_to_file
)
1406 def PerformWebkitDirectoryCleanup(self
, revision
):
1407 """If the script is switching between Blink and WebKit during bisect,
1408 its faster to just delete the directory rather than leave it up to git
1414 if not self
.source_control
.CheckoutFileAtRevision(
1415 bisect_utils
.FILE_DEPS_GIT
, revision
):
1419 os
.chdir(self
.src_cwd
)
1421 is_blink
= bisect_utils
.IsDepsFileBlink()
1425 if not self
.source_control
.RevertFileToHead(
1426 bisect_utils
.FILE_DEPS_GIT
):
1429 if self
.was_blink
!= is_blink
:
1430 self
.was_blink
= is_blink
1431 return bisect_utils
.RemoveThirdPartyWebkitDirectory()
1434 def PerformCrosChrootCleanup(self
):
1435 """Deletes the chroot.
1441 self
.ChangeToDepotWorkingDirectory('cros')
1442 cmd
= [CROS_SDK_PATH
, '--delete']
1443 return_code
= RunProcess(cmd
)
1445 return not return_code
1447 def CreateCrosChroot(self
):
1448 """Creates a new chroot.
1454 self
.ChangeToDepotWorkingDirectory('cros')
1455 cmd
= [CROS_SDK_PATH
, '--create']
1456 return_code
= RunProcess(cmd
)
1458 return not return_code
1460 def PerformPreSyncCleanup(self
, revision
, depot
):
1461 """Performs any necessary cleanup before syncing.
1466 if depot
== 'chromium':
1467 if not bisect_utils
.RemoveThirdPartyLibjingleDirectory():
1469 return self
.PerformWebkitDirectoryCleanup(revision
)
1470 elif depot
== 'cros':
1471 return self
.PerformCrosChrootCleanup()
1474 def RunPostSync(self
, depot
):
1475 """Performs any work after syncing.
1480 if self
.opts
.target_platform
== 'android':
1481 if not bisect_utils
.SetupAndroidBuildEnvironment(self
.opts
,
1482 path_to_src
=self
.src_cwd
):
1486 return self
.CreateCrosChroot()
1488 return self
.RunGClientHooks()
1491 def ShouldSkipRevision(self
, depot
, revision
):
1492 """Some commits can be safely skipped (such as a DEPS roll), since the tool
1493 is git based those changes would have no effect.
1496 depot: The depot being bisected.
1497 revision: Current revision we're synced to.
1500 True if we should skip building/testing this revision.
1502 if depot
== 'chromium':
1503 if self
.source_control
.IsGit():
1504 cmd
= ['diff-tree', '--no-commit-id', '--name-only', '-r', revision
]
1505 output
= CheckRunGit(cmd
)
1507 files
= output
.splitlines()
1509 if len(files
) == 1 and files
[0] == 'DEPS':
1514 def SyncBuildAndRunRevision(self
, revision
, depot
, command_to_run
, metric
,
1516 """Performs a full sync/build/run of the specified revision.
1519 revision: The revision to sync to.
1520 depot: The depot that's being used at the moment (src, webkit, etc.)
1521 command_to_run: The command to execute the performance test.
1522 metric: The performance metric being tested.
1525 On success, a tuple containing the results of the performance test.
1526 Otherwise, a tuple with the error message.
1529 if depot
== 'chromium':
1530 sync_client
= 'gclient'
1531 elif depot
== 'cros':
1532 sync_client
= 'repo'
1534 revisions_to_sync
= self
.FindAllRevisionsToSync(revision
, depot
)
1536 if not revisions_to_sync
:
1537 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL
)
1539 if not self
.PerformPreSyncCleanup(revision
, depot
):
1540 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL
)
1544 if not self
.opts
.debug_ignore_sync
:
1545 for r
in revisions_to_sync
:
1546 self
.ChangeToDepotWorkingDirectory(r
[0])
1549 self
.PerformPreBuildCleanup()
1551 if not self
.source_control
.SyncToRevision(r
[1], sync_client
):
1557 success
= self
.RunPostSync(depot
)
1560 if skippable
and self
.ShouldSkipRevision(depot
, revision
):
1561 return ('Skipped revision: [%s]' % str(revision
),
1562 BUILD_RESULT_SKIPPED
)
1564 start_build_time
= time
.time()
1565 if self
.BuildCurrentRevision(depot
):
1566 after_build_time
= time
.time()
1567 results
= self
.RunPerformanceTestAndParseResults(command_to_run
,
1571 external_revisions
= self
.Get3rdPartyRevisionsFromCurrentRevision(
1574 if not external_revisions
is None:
1575 return (results
[0], results
[1], external_revisions
,
1576 time
.time() - after_build_time
, time
.time() -
1579 return ('Failed to parse DEPS file for external revisions.',
1584 return ('Failed to build revision: [%s]' % (str(revision
, )),
1587 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL
)
1589 return ('Failed to sync revision: [%s]' % (str(revision
, )),
1592 def CheckIfRunPassed(self
, current_value
, known_good_value
, known_bad_value
):
1593 """Given known good and bad values, decide if the current_value passed
1597 current_value: The value of the metric being checked.
1598 known_bad_value: The reference value for a "failed" run.
1599 known_good_value: The reference value for a "passed" run.
1602 True if the current_value is closer to the known_good_value than the
1605 dist_to_good_value
= abs(current_value
['mean'] - known_good_value
['mean'])
1606 dist_to_bad_value
= abs(current_value
['mean'] - known_bad_value
['mean'])
1608 return dist_to_good_value
< dist_to_bad_value
1610 def ChangeToDepotWorkingDirectory(self
, depot_name
):
1611 """Given a depot, changes to the appropriate working directory.
1614 depot_name: The name of the depot (see DEPOT_NAMES).
1616 if depot_name
== 'chromium':
1617 os
.chdir(self
.src_cwd
)
1618 elif depot_name
== 'cros':
1619 os
.chdir(self
.cros_cwd
)
1620 elif depot_name
in DEPOT_NAMES
:
1621 os
.chdir(self
.depot_cwd
[depot_name
])
1623 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
1624 ' was added without proper support?' %\
1627 def FindNextDepotToBisect(self
, current_revision
, min_revision_data
,
1629 """Given the state of the bisect, decides which depot the script should
1630 dive into next (if any).
1633 current_revision: Current revision synced to.
1634 min_revision_data: Data about the earliest revision in the bisect range.
1635 max_revision_data: Data about the latest revision in the bisect range.
1638 The depot to bisect next, or None.
1640 external_depot
= None
1641 for current_depot
in DEPOT_NAMES
:
1642 if DEPOT_DEPS_NAME
[current_depot
].has_key('platform'):
1643 if DEPOT_DEPS_NAME
[current_depot
]['platform'] != os
.name
:
1646 if not (DEPOT_DEPS_NAME
[current_depot
]["recurse"] and
1647 DEPOT_DEPS_NAME
[current_depot
]['from'] ==
1648 min_revision_data
['depot']):
1651 if (min_revision_data
['external'][current_depot
] ==
1652 max_revision_data
['external'][current_depot
]):
1655 if (min_revision_data
['external'][current_depot
] and
1656 max_revision_data
['external'][current_depot
]):
1657 external_depot
= current_depot
1660 return external_depot
1662 def PrepareToBisectOnDepot(self
,
1668 """Changes to the appropriate directory and gathers a list of revisions
1669 to bisect between |start_revision| and |end_revision|.
1672 current_depot: The depot we want to bisect.
1673 end_revision: End of the revision range.
1674 start_revision: Start of the revision range.
1675 previous_depot: The depot we were previously bisecting.
1676 previous_revision: The last revision we synced to on |previous_depot|.
1679 A list containing the revisions between |start_revision| and
1680 |end_revision| inclusive.
1682 # Change into working directory of external library to run
1683 # subsequent commands.
1684 self
.ChangeToDepotWorkingDirectory(current_depot
)
1686 # V8 (and possibly others) is merged in periodically. Bisecting
1687 # this directory directly won't give much good info.
1688 if DEPOT_DEPS_NAME
[current_depot
].has_key('custom_deps'):
1689 config_path
= os
.path
.join(self
.src_cwd
, '..')
1690 if bisect_utils
.RunGClientAndCreateConfig(self
.opts
,
1691 DEPOT_DEPS_NAME
[current_depot
]['custom_deps'], cwd
=config_path
):
1693 if bisect_utils
.RunGClient(
1694 ['sync', '--revision', previous_revision
], cwd
=self
.src_cwd
):
1697 if current_depot
== 'v8_bleeding_edge':
1698 self
.ChangeToDepotWorkingDirectory('chromium')
1700 shutil
.move('v8', 'v8.bak')
1701 shutil
.move('v8_bleeding_edge', 'v8')
1703 self
.cleanup_commands
.append(['mv', 'v8', 'v8_bleeding_edge'])
1704 self
.cleanup_commands
.append(['mv', 'v8.bak', 'v8'])
1706 self
.depot_cwd
['v8_bleeding_edge'] = os
.path
.join(self
.src_cwd
, 'v8')
1707 self
.depot_cwd
['v8'] = os
.path
.join(self
.src_cwd
, 'v8.bak')
1709 self
.ChangeToDepotWorkingDirectory(current_depot
)
1711 depot_revision_list
= self
.GetRevisionList(current_depot
,
1715 self
.ChangeToDepotWorkingDirectory('chromium')
1717 return depot_revision_list
1719 def GatherReferenceValues(self
, good_rev
, bad_rev
, cmd
, metric
, target_depot
):
1720 """Gathers reference values by running the performance tests on the
1721 known good and bad revisions.
1724 good_rev: The last known good revision where the performance regression
1725 has not occurred yet.
1726 bad_rev: A revision where the performance regression has already occurred.
1727 cmd: The command to execute the performance test.
1728 metric: The metric being tested for regression.
1731 A tuple with the results of building and running each revision.
1733 bad_run_results
= self
.SyncBuildAndRunRevision(bad_rev
,
1738 good_run_results
= None
1740 if not bad_run_results
[1]:
1741 good_run_results
= self
.SyncBuildAndRunRevision(good_rev
,
1746 return (bad_run_results
, good_run_results
)
1748 def AddRevisionsIntoRevisionData(self
, revisions
, depot
, sort
, revision_data
):
1749 """Adds new revisions to the revision_data dict and initializes them.
1752 revisions: List of revisions to add.
1753 depot: Depot that's currently in use (src, webkit, etc...)
1754 sort: Sorting key for displaying revisions.
1755 revision_data: A dict to add the new revisions into. Existing revisions
1756 will have their sort keys offset.
1759 num_depot_revisions
= len(revisions
)
1761 for k
, v
in revision_data
.iteritems():
1762 if v
['sort'] > sort
:
1763 v
['sort'] += num_depot_revisions
1765 for i
in xrange(num_depot_revisions
):
1768 revision_data
[r
] = {'revision' : r
,
1774 'sort' : i
+ sort
+ 1}
1776 def PrintRevisionsToBisectMessage(self
, revision_list
, depot
):
1777 if self
.opts
.output_buildbot_annotations
:
1778 step_name
= 'Bisection Range: [%s - %s]' % (
1779 revision_list
[len(revision_list
)-1], revision_list
[0])
1780 bisect_utils
.OutputAnnotationStepStart(step_name
)
1783 print 'Revisions to bisect on [%s]:' % depot
1784 for revision_id
in revision_list
:
1785 print ' -> %s' % (revision_id
, )
1788 if self
.opts
.output_buildbot_annotations
:
1789 bisect_utils
.OutputAnnotationStepClosed()
1791 def NudgeRevisionsIfDEPSChange(self
, bad_revision
, good_revision
):
1792 """Checks to see if changes to DEPS file occurred, and that the revision
1793 range also includes the change to .DEPS.git. If it doesn't, attempts to
1794 expand the revision range to include it.
1797 bad_rev: First known bad revision.
1798 good_revision: Last known good revision.
1801 A tuple with the new bad and good revisions.
1803 if self
.source_control
.IsGit() and self
.opts
.target_platform
== 'chromium':
1804 changes_to_deps
= self
.source_control
.QueryFileRevisionHistory(
1805 'DEPS', good_revision
, bad_revision
)
1808 # DEPS file was changed, search from the oldest change to DEPS file to
1809 # bad_revision to see if there are matching .DEPS.git changes.
1810 oldest_deps_change
= changes_to_deps
[-1]
1811 changes_to_gitdeps
= self
.source_control
.QueryFileRevisionHistory(
1812 bisect_utils
.FILE_DEPS_GIT
, oldest_deps_change
, bad_revision
)
1814 if len(changes_to_deps
) != len(changes_to_gitdeps
):
1815 # Grab the timestamp of the last DEPS change
1816 cmd
= ['log', '--format=%ct', '-1', changes_to_deps
[0]]
1817 output
= CheckRunGit(cmd
)
1818 commit_time
= int(output
)
1820 # Try looking for a commit that touches the .DEPS.git file in the
1821 # next 15 minutes after the DEPS file change.
1822 cmd
= ['log', '--format=%H', '-1',
1823 '--before=%d' % (commit_time
+ 900), '--after=%d' % commit_time
,
1824 'origin/master', bisect_utils
.FILE_DEPS_GIT
]
1825 output
= CheckRunGit(cmd
)
1826 output
= output
.strip()
1828 self
.warnings
.append('Detected change to DEPS and modified '
1829 'revision range to include change to .DEPS.git')
1830 return (output
, good_revision
)
1832 self
.warnings
.append('Detected change to DEPS but couldn\'t find '
1833 'matching change to .DEPS.git')
1834 return (bad_revision
, good_revision
)
1836 def CheckIfRevisionsInProperOrder(self
,
1840 """Checks that |good_revision| is an earlier revision than |bad_revision|.
1843 good_revision: Number/tag of the known good revision.
1844 bad_revision: Number/tag of the known bad revision.
1847 True if the revisions are in the proper order (good earlier than bad).
1849 if self
.source_control
.IsGit() and target_depot
!= 'cros':
1850 cmd
= ['log', '--format=%ct', '-1', good_revision
]
1851 output
= CheckRunGit(cmd
)
1852 good_commit_time
= int(output
)
1854 cmd
= ['log', '--format=%ct', '-1', bad_revision
]
1855 output
= CheckRunGit(cmd
)
1856 bad_commit_time
= int(output
)
1858 return good_commit_time
<= bad_commit_time
1860 # Cros/svn use integers
1861 return int(good_revision
) <= int(bad_revision
)
1863 def Run(self
, command_to_run
, bad_revision_in
, good_revision_in
, metric
):
1864 """Given known good and bad revisions, run a binary search on all
1865 intermediate revisions to determine the CL where the performance regression
1869 command_to_run: Specify the command to execute the performance test.
1870 good_revision: Number/tag of the known good revision.
1871 bad_revision: Number/tag of the known bad revision.
1872 metric: The performance metric to monitor.
1875 A dict with 2 members, 'revision_data' and 'error'. On success,
1876 'revision_data' will contain a dict mapping revision ids to
1877 data about that revision. Each piece of revision data consists of a
1878 dict with the following keys:
1880 'passed': Represents whether the performance test was successful at
1881 that revision. Possible values include: 1 (passed), 0 (failed),
1882 '?' (skipped), 'F' (build failed).
1883 'depot': The depot that this revision is from (ie. WebKit)
1884 'external': If the revision is a 'src' revision, 'external' contains
1885 the revisions of each of the external libraries.
1886 'sort': A sort value for sorting the dict in order of commits.
1903 If an error occurred, the 'error' field will contain the message and
1904 'revision_data' will be empty.
1907 results
= {'revision_data' : {},
1910 # Choose depot to bisect first
1911 target_depot
= 'chromium'
1912 if self
.opts
.target_platform
== 'cros':
1913 target_depot
= 'cros'
1916 self
.ChangeToDepotWorkingDirectory(target_depot
)
1918 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
1919 bad_revision
= self
.source_control
.ResolveToRevision(bad_revision_in
,
1921 good_revision
= self
.source_control
.ResolveToRevision(good_revision_in
,
1927 if bad_revision
is None:
1928 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in
,)
1931 if good_revision
is None:
1932 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in
,)
1935 # Check that they didn't accidentally swap good and bad revisions.
1936 if not self
.CheckIfRevisionsInProperOrder(
1937 target_depot
, good_revision
, bad_revision
):
1938 results
['error'] = 'bad_revision < good_revision, did you swap these '\
1942 (bad_revision
, good_revision
) = self
.NudgeRevisionsIfDEPSChange(
1943 bad_revision
, good_revision
)
1945 if self
.opts
.output_buildbot_annotations
:
1946 bisect_utils
.OutputAnnotationStepStart('Gathering Revisions')
1948 print 'Gathering revision range for bisection.'
1950 # Retrieve a list of revisions to do bisection on.
1951 src_revision_list
= self
.GetRevisionList(target_depot
,
1955 if self
.opts
.output_buildbot_annotations
:
1956 bisect_utils
.OutputAnnotationStepClosed()
1958 if src_revision_list
:
1959 # revision_data will store information about a revision such as the
1960 # depot it came from, the webkit/V8 revision at that time,
1961 # performance timing, build state, etc...
1962 revision_data
= results
['revision_data']
1964 # revision_list is the list we're binary searching through at the moment.
1969 for current_revision_id
in src_revision_list
:
1972 revision_data
[current_revision_id
] = {'value' : None,
1974 'depot' : target_depot
,
1978 'sort' : sort_key_ids
}
1979 revision_list
.append(current_revision_id
)
1982 max_revision
= len(revision_list
) - 1
1984 self
.PrintRevisionsToBisectMessage(revision_list
, target_depot
)
1986 if self
.opts
.output_buildbot_annotations
:
1987 bisect_utils
.OutputAnnotationStepStart('Gathering Reference Values')
1989 print 'Gathering reference values for bisection.'
1991 # Perform the performance tests on the good and bad revisions, to get
1993 (bad_results
, good_results
) = self
.GatherReferenceValues(good_revision
,
1999 if self
.opts
.output_buildbot_annotations
:
2000 bisect_utils
.OutputAnnotationStepClosed()
2003 results
['error'] = ('An error occurred while building and running '
2004 'the \'bad\' reference value. The bisect cannot continue without '
2005 'a working \'bad\' revision to start from.\n\nError: %s' %
2010 results
['error'] = ('An error occurred while building and running '
2011 'the \'good\' reference value. The bisect cannot continue without '
2012 'a working \'good\' revision to start from.\n\nError: %s' %
2017 # We need these reference values to determine if later runs should be
2018 # classified as pass or fail.
2019 known_bad_value
= bad_results
[0]
2020 known_good_value
= good_results
[0]
2022 # Can just mark the good and bad revisions explicitly here since we
2023 # already know the results.
2024 bad_revision_data
= revision_data
[revision_list
[0]]
2025 bad_revision_data
['external'] = bad_results
[2]
2026 bad_revision_data
['perf_time'] = bad_results
[3]
2027 bad_revision_data
['build_time'] = bad_results
[4]
2028 bad_revision_data
['passed'] = False
2029 bad_revision_data
['value'] = known_bad_value
2031 good_revision_data
= revision_data
[revision_list
[max_revision
]]
2032 good_revision_data
['external'] = good_results
[2]
2033 good_revision_data
['perf_time'] = good_results
[3]
2034 good_revision_data
['build_time'] = good_results
[4]
2035 good_revision_data
['passed'] = True
2036 good_revision_data
['value'] = known_good_value
2038 next_revision_depot
= target_depot
2041 if not revision_list
:
2044 min_revision_data
= revision_data
[revision_list
[min_revision
]]
2045 max_revision_data
= revision_data
[revision_list
[max_revision
]]
2047 if max_revision
- min_revision
<= 1:
2048 current_depot
= min_revision_data
['depot']
2049 if min_revision_data
['passed'] == '?':
2050 next_revision_index
= min_revision
2051 elif max_revision_data
['passed'] == '?':
2052 next_revision_index
= max_revision
2053 elif current_depot
in ['cros', 'chromium', 'v8']:
2054 previous_revision
= revision_list
[min_revision
]
2055 # If there were changes to any of the external libraries we track,
2056 # should bisect the changes there as well.
2057 external_depot
= self
.FindNextDepotToBisect(
2058 previous_revision
, min_revision_data
, max_revision_data
)
2060 # If there was no change in any of the external depots, the search
2062 if not external_depot
:
2063 if current_depot
== 'v8':
2064 self
.warnings
.append('Unfortunately, V8 bisection couldn\'t '
2065 'continue any further. The script can only bisect into '
2066 'V8\'s bleeding_edge repository if both the current and '
2067 'previous revisions in trunk map directly to revisions in '
2071 earliest_revision
= max_revision_data
['external'][external_depot
]
2072 latest_revision
= min_revision_data
['external'][external_depot
]
2074 new_revision_list
= self
.PrepareToBisectOnDepot(external_depot
,
2077 next_revision_depot
,
2080 if not new_revision_list
:
2081 results
['error'] = 'An error occurred attempting to retrieve'\
2082 ' revision range: [%s..%s]' %\
2083 (earliest_revision
, latest_revision
)
2086 self
.AddRevisionsIntoRevisionData(new_revision_list
,
2088 min_revision_data
['sort'],
2091 # Reset the bisection and perform it on the newly inserted
2093 revision_list
= new_revision_list
2095 max_revision
= len(revision_list
) - 1
2096 sort_key_ids
+= len(revision_list
)
2098 print 'Regression in metric:%s appears to be the result of changes'\
2099 ' in [%s].' % (metric
, external_depot
)
2101 self
.PrintRevisionsToBisectMessage(revision_list
, external_depot
)
2107 next_revision_index
= int((max_revision
- min_revision
) / 2) +\
2110 next_revision_id
= revision_list
[next_revision_index
]
2111 next_revision_data
= revision_data
[next_revision_id
]
2112 next_revision_depot
= next_revision_data
['depot']
2114 self
.ChangeToDepotWorkingDirectory(next_revision_depot
)
2116 if self
.opts
.output_buildbot_annotations
:
2117 step_name
= 'Working on [%s]' % next_revision_id
2118 bisect_utils
.OutputAnnotationStepStart(step_name
)
2120 print 'Working on revision: [%s]' % next_revision_id
2122 run_results
= self
.SyncBuildAndRunRevision(next_revision_id
,
2123 next_revision_depot
,
2125 metric
, skippable
=True)
2127 # If the build is successful, check whether or not the metric
2129 if not run_results
[1]:
2130 if len(run_results
) > 2:
2131 next_revision_data
['external'] = run_results
[2]
2132 next_revision_data
['perf_time'] = run_results
[3]
2133 next_revision_data
['build_time'] = run_results
[4]
2135 passed_regression
= self
.CheckIfRunPassed(run_results
[0],
2139 next_revision_data
['passed'] = passed_regression
2140 next_revision_data
['value'] = run_results
[0]
2142 if passed_regression
:
2143 max_revision
= next_revision_index
2145 min_revision
= next_revision_index
2147 if run_results
[1] == BUILD_RESULT_SKIPPED
:
2148 next_revision_data
['passed'] = 'Skipped'
2149 elif run_results
[1] == BUILD_RESULT_FAIL
:
2150 next_revision_data
['passed'] = 'Build Failed'
2152 print run_results
[0]
2154 # If the build is broken, remove it and redo search.
2155 revision_list
.pop(next_revision_index
)
2159 if self
.opts
.output_buildbot_annotations
:
2160 bisect_utils
.OutputAnnotationStepClosed()
2162 # Weren't able to sync and retrieve the revision range.
2163 results
['error'] = 'An error occurred attempting to retrieve revision '\
2164 'range: [%s..%s]' % (good_revision
, bad_revision
)
2168 def _PrintBanner(self
, results_dict
):
2170 print " __o_\___ Aw Snap! We hit a speed bump!"
2171 print "=-O----O-'__.~.___________________________________"
2173 print 'Bisect reproduced a %.02f%% (+-%.02f%%) change in the %s metric.' % (
2174 results_dict
['regression_size'], results_dict
['regression_std_err'],
2175 '/'.join(self
.opts
.metric
))
2176 # The perf dashboard specifically looks for the string
2177 # "Confidence in Bisection Results: 100%" to decide whether or not
2178 # to cc the author(s). If you change this, please update the perf
2179 # dashboard as well.
2180 print 'Confidence in Bisection Results: %d%%' % results_dict
['confidence']
2182 def _PrintRevisionInfo(self
, cl
, info
, depot
=None):
2183 # The perf dashboard specifically looks for the string
2184 # "Author : " to parse out who to cc on a bug. If you change the
2185 # formatting here, please update the perf dashboard as well.
2187 print 'Subject : %s' % info
['subject']
2188 print 'Author : %s' % info
['author']
2189 if not info
['email'].startswith(info
['author']):
2190 print 'Email : %s' % info
['email']
2191 if depot
and DEPOT_DEPS_NAME
[depot
].has_key('viewvc'):
2193 # Format is "git-svn-id: svn://....@123456 <other data>"
2194 svn_line
= [i
for i
in info
['body'].splitlines() if 'git-svn-id:' in i
]
2195 svn_revision
= svn_line
[0].split('@')
2196 svn_revision
= svn_revision
[1].split(' ')[0]
2197 print 'Link : %s' % DEPOT_DEPS_NAME
[depot
]['viewvc'] + svn_revision
2200 print 'Failed to parse svn revision from body:'
2204 print 'Commit : %s' % cl
2205 print 'Date : %s' % info
['date']
2207 def _PrintTestedCommitsTable(self
, revision_data_sorted
,
2208 first_working_revision
, last_broken_revision
):
2210 print 'Tested commits:'
2211 print ' %20s %40s %12s %14s %13s' % ('Depot'.center(20, ' '),
2212 'Commit SHA'.center(40, ' '), 'Mean'.center(12, ' '),
2213 'Std. Error'.center(14, ' '), 'State'.center(13, ' '))
2215 for current_id
, current_data
in revision_data_sorted
:
2216 if current_data
['value']:
2217 if (current_id
== last_broken_revision
or
2218 current_id
== first_working_revision
):
2224 state_str
= 'Suspected CL'
2227 state_str
= state_str
.center(13, ' ')
2229 std_error
= ('+-%.02f' %
2230 current_data
['value']['std_err']).center(14, ' ')
2231 mean
= ('%.02f' % current_data
['value']['mean']).center(12, ' ')
2232 print ' %20s %40s %12s %14s %13s' % (
2233 current_data
['depot'].center(20, ' '), current_id
, mean
,
2234 std_error
, state_str
)
2236 def _PrintReproSteps(self
):
2238 print 'To reproduce locally:'
2239 print '$ ' + self
.opts
.command
2240 if bisect_utils
.IsTelemetryCommand(self
.opts
.command
):
2242 print 'Also consider passing --profiler=list to see available profilers.'
2244 def _PrintOtherRegressions(self
, other_regressions
, revision_data
):
2246 print 'Other regressions may have occurred:'
2247 for regression
in other_regressions
:
2248 current_id
, previous_id
, percent_change
, deviations
= regression
2249 current_data
= revision_data
[current_id
]
2250 previous_data
= revision_data
[previous_id
]
2252 if deviations
is None:
2255 deviations
= '%.2f' % deviations
2257 if percent_change
is None:
2260 print ' %8s %s [%.2f%%, %s x std.dev]' % (
2261 previous_data
['depot'], previous_id
, 100 * percent_change
, deviations
)
2262 print ' %8s %s' % (current_data
['depot'], current_id
)
2265 def _PrintStepTime(self
, revision_data_sorted
):
2266 step_perf_time_avg
= 0.0
2267 step_build_time_avg
= 0.0
2269 for _
, current_data
in revision_data_sorted
:
2270 step_perf_time_avg
+= current_data
['perf_time']
2271 step_build_time_avg
+= current_data
['build_time']
2274 step_perf_time_avg
= step_perf_time_avg
/ step_count
2275 step_build_time_avg
= step_build_time_avg
/ step_count
2277 print 'Average build time : %s' % datetime
.timedelta(
2278 seconds
=int(step_build_time_avg
))
2279 print 'Average test time : %s' % datetime
.timedelta(
2280 seconds
=int(step_perf_time_avg
))
2282 def _PrintWarnings(self
):
2283 if not self
.warnings
:
2287 for w
in self
.warnings
:
2290 def _GetResultsDict(self
, revision_data
, revision_data_sorted
):
2291 # Find range where it possibly broke.
2292 first_working_revision
= None
2293 first_working_revision_index
= -1
2294 last_broken_revision
= None
2295 last_broken_revision_index
= -1
2297 for i
in xrange(len(revision_data_sorted
)):
2298 k
, v
= revision_data_sorted
[i
]
2299 if v
['passed'] == 1:
2300 if not first_working_revision
:
2301 first_working_revision
= k
2302 first_working_revision_index
= i
2305 last_broken_revision
= k
2306 last_broken_revision_index
= i
2308 if last_broken_revision
!= None and first_working_revision
!= None:
2309 bounds_broken
= [revision_data
[last_broken_revision
]['value']['mean'],
2310 revision_data
[last_broken_revision
]['value']['mean']]
2312 for i
in xrange(0, last_broken_revision_index
+ 1):
2313 if revision_data_sorted
[i
][1]['value']:
2314 bounds_broken
[0] = min(bounds_broken
[0],
2315 revision_data_sorted
[i
][1]['value']['mean'])
2316 bounds_broken
[1] = max(bounds_broken
[1],
2317 revision_data_sorted
[i
][1]['value']['mean'])
2318 broken_mean
.extend(revision_data_sorted
[i
][1]['value']['values'])
2320 bounds_working
= [revision_data
[first_working_revision
]['value']['mean'],
2321 revision_data
[first_working_revision
]['value']['mean']]
2323 for i
in xrange(first_working_revision_index
, len(revision_data_sorted
)):
2324 if revision_data_sorted
[i
][1]['value']:
2325 bounds_working
[0] = min(bounds_working
[0],
2326 revision_data_sorted
[i
][1]['value']['mean'])
2327 bounds_working
[1] = max(bounds_working
[1],
2328 revision_data_sorted
[i
][1]['value']['mean'])
2329 working_mean
.extend(revision_data_sorted
[i
][1]['value']['values'])
2331 # Calculate the approximate size of the regression
2332 mean_of_bad_runs
= CalculateTruncatedMean(broken_mean
, 0.0)
2333 mean_of_good_runs
= CalculateTruncatedMean(working_mean
, 0.0)
2335 regression_size
= math
.fabs(max(mean_of_good_runs
, mean_of_bad_runs
) /
2336 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0 - 100.0
2338 regression_std_err
= math
.fabs(CalculatePooledStandardError(
2339 [working_mean
, broken_mean
]) /
2340 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0
2342 # Give a "confidence" in the bisect. At the moment we use how distinct the
2343 # values are before and after the last broken revision, and how noisy the
2345 dist_between_groups
= min(math
.fabs(bounds_broken
[1] - bounds_working
[0]),
2346 math
.fabs(bounds_broken
[0] - bounds_working
[1]))
2347 len_working_group
= CalculateStandardDeviation(working_mean
)
2348 len_broken_group
= CalculateStandardDeviation(broken_mean
)
2350 confidence
= (dist_between_groups
/ (
2351 max(0.0001, (len_broken_group
+ len_working_group
))))
2352 confidence
= int(min(1.0, max(confidence
, 0.0)) * 100.0)
2354 culprit_revisions
= []
2357 self
.ChangeToDepotWorkingDirectory(
2358 revision_data
[last_broken_revision
]['depot'])
2360 if revision_data
[last_broken_revision
]['depot'] == 'cros':
2361 # Want to get a list of all the commits and what depots they belong
2362 # to so that we can grab info about each.
2363 cmd
= ['repo', 'forall', '-c',
2364 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2365 last_broken_revision
, first_working_revision
+ 1)]
2366 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
2369 assert not return_code
, 'An error occurred while running'\
2370 ' "%s"' % ' '.join(cmd
)
2373 for l
in output
.split('\n'):
2375 # Output will be in form:
2377 # /path_to_other_depot
2385 contents
= l
.split(' ')
2386 if len(contents
) > 1:
2387 changes
.append([last_depot
, contents
[0]])
2390 info
= self
.source_control
.QueryRevisionInfo(c
[1])
2391 culprit_revisions
.append((c
[1], info
, None))
2393 for i
in xrange(last_broken_revision_index
, len(revision_data_sorted
)):
2394 k
, v
= revision_data_sorted
[i
]
2395 if k
== first_working_revision
:
2397 self
.ChangeToDepotWorkingDirectory(v
['depot'])
2398 info
= self
.source_control
.QueryRevisionInfo(k
)
2399 culprit_revisions
.append((k
, info
, v
['depot']))
2402 # Check for any other possible regression ranges
2403 good_std_dev
= revision_data
[first_working_revision
]['value']['std_err']
2404 good_mean
= revision_data
[first_working_revision
]['value']['mean']
2405 bad_mean
= revision_data
[last_broken_revision
]['value']['mean']
2406 prev_revision_data
= revision_data_sorted
[0][1]
2407 prev_revision_id
= revision_data_sorted
[0][0]
2408 other_regressions
= []
2409 for current_id
, current_data
in revision_data_sorted
:
2410 if current_data
['value']:
2411 prev_mean
= prev_revision_data
['value']['mean']
2412 cur_mean
= current_data
['value']['mean']
2415 deviations
= math
.fabs(prev_mean
- cur_mean
) / good_std_dev
2420 percent_change
= (prev_mean
- cur_mean
) / good_mean
2422 # If the "good" valuse are supposed to be higher than the "bad"
2423 # values (ie. scores), flip the sign of the percent change so that
2424 # a positive value always represents a regression.
2425 if bad_mean
< good_mean
:
2426 percent_change
*= -1.0
2428 percent_change
= None
2430 if deviations
>= 1.5 or percent_change
> 0.01:
2431 if current_id
!= first_working_revision
:
2432 other_regressions
.append(
2433 [current_id
, prev_revision_id
, percent_change
, deviations
])
2434 prev_revision_data
= current_data
2435 prev_revision_id
= current_id
2437 # Check for warnings:
2438 if len(culprit_revisions
) > 1:
2439 self
.warnings
.append('Due to build errors, regression range could '
2440 'not be narrowed down to a single commit.')
2441 if self
.opts
.repeat_test_count
== 1:
2442 self
.warnings
.append('Tests were only set to run once. This may '
2443 'be insufficient to get meaningful results.')
2444 if confidence
< 100:
2445 self
.warnings
.append(
2446 'Confidence is less than 100%. There could be other candidates for '
2447 'this regression. Try bisecting again with increased repeat_count or '
2448 'on a sub-metric that shows the regression more clearly.')
2451 'first_working_revision': first_working_revision
,
2452 'last_broken_revision': last_broken_revision
,
2453 'culprit_revisions': culprit_revisions
,
2454 'other_regressions': other_regressions
,
2455 'regression_size': regression_size
,
2456 'regression_std_err': regression_std_err
,
2457 'confidence': confidence
,
2460 def FormatAndPrintResults(self
, bisect_results
):
2461 """Prints the results from a bisection run in a readable format.
2464 bisect_results: The results from a bisection test run.
2466 revision_data
= bisect_results
['revision_data']
2467 revision_data_sorted
= sorted(revision_data
.iteritems(),
2468 key
= lambda x
: x
[1]['sort'])
2469 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
2471 if self
.opts
.output_buildbot_annotations
:
2472 bisect_utils
.OutputAnnotationStepStart('Build Status Per Revision')
2475 print 'Full results of bisection:'
2476 for current_id
, current_data
in revision_data_sorted
:
2477 build_status
= current_data
['passed']
2479 if type(build_status
) is bool:
2481 build_status
= 'Good'
2483 build_status
= 'Bad'
2485 print ' %20s %40s %s' % (current_data
['depot'],
2486 current_id
, build_status
)
2489 if self
.opts
.output_buildbot_annotations
:
2490 bisect_utils
.OutputAnnotationStepClosed()
2491 # The perf dashboard scrapes the "results" step in order to comment on
2492 # bugs. If you change this, please update the perf dashboard as well.
2493 bisect_utils
.OutputAnnotationStepStart('Results')
2495 if results_dict
['culprit_revisions']:
2496 self
._PrintBanner
(results_dict
)
2497 for culprit
in results_dict
['culprit_revisions']:
2498 cl
, info
, depot
= culprit
2499 self
._PrintRevisionInfo
(cl
, info
, depot
)
2500 self
._PrintReproSteps
()
2501 if results_dict
['other_regressions']:
2502 self
._PrintOtherRegressions
(results_dict
['other_regressions'],
2505 self
._PrintTestedCommitsTable
(revision_data_sorted
,
2506 results_dict
['first_working_revision'],
2507 results_dict
['last_broken_revision'])
2508 self
._PrintStepTime
(revision_data_sorted
)
2509 self
._PrintWarnings
()
2511 if self
.opts
.output_buildbot_annotations
:
2512 bisect_utils
.OutputAnnotationStepClosed()
2515 def DetermineAndCreateSourceControl(opts
):
2516 """Attempts to determine the underlying source control workflow and returns
2517 a SourceControl object.
2520 An instance of a SourceControl object, or None if the current workflow
2524 (output
, return_code
) = RunGit(['rev-parse', '--is-inside-work-tree'])
2526 if output
.strip() == 'true':
2527 return GitSourceControl(opts
)
2532 def IsPlatformSupported(opts
):
2533 """Checks that this platform and build system are supported.
2536 opts: The options parsed from the command line.
2539 True if the platform and build system are supported.
2541 # Haven't tested the script out on any other platforms yet.
2542 supported
= ['posix', 'nt']
2543 return os
.name
in supported
2546 def RmTreeAndMkDir(path_to_dir
):
2547 """Removes the directory tree specified, and then creates an empty
2548 directory in the same location.
2551 path_to_dir: Path to the directory tree.
2554 True if successful, False if an error occurred.
2557 if os
.path
.exists(path_to_dir
):
2558 shutil
.rmtree(path_to_dir
)
2560 if e
.errno
!= errno
.ENOENT
:
2564 os
.makedirs(path_to_dir
)
2566 if e
.errno
!= errno
.EEXIST
:
2572 def RemoveBuildFiles():
2573 """Removes build files from previous runs."""
2574 if RmTreeAndMkDir(os
.path
.join('out', 'Release')):
2575 if RmTreeAndMkDir(os
.path
.join('build', 'Release')):
2580 class BisectOptions(object):
2581 """Options to be used when running bisection."""
2583 super(BisectOptions
, self
).__init
__()
2585 self
.target_platform
= 'chromium'
2586 self
.build_preference
= None
2587 self
.good_revision
= None
2588 self
.bad_revision
= None
2589 self
.use_goma
= None
2590 self
.cros_board
= None
2591 self
.cros_remote_ip
= None
2592 self
.repeat_test_count
= 20
2593 self
.truncate_percent
= 25
2594 self
.max_time_minutes
= 20
2597 self
.output_buildbot_annotations
= None
2598 self
.no_custom_deps
= False
2599 self
.working_directory
= None
2600 self
.debug_ignore_build
= None
2601 self
.debug_ignore_sync
= None
2602 self
.debug_ignore_perf_test
= None
2604 def _CreateCommandLineParser(self
):
2605 """Creates a parser with bisect options.
2608 An instance of optparse.OptionParser.
2610 usage
= ('%prog [options] [-- chromium-options]\n'
2611 'Perform binary search on revision history to find a minimal '
2612 'range of revisions where a peformance metric regressed.\n')
2614 parser
= optparse
.OptionParser(usage
=usage
)
2616 group
= optparse
.OptionGroup(parser
, 'Bisect options')
2617 group
.add_option('-c', '--command',
2619 help='A command to execute your performance test at' +
2620 ' each point in the bisection.')
2621 group
.add_option('-b', '--bad_revision',
2623 help='A bad revision to start bisection. ' +
2624 'Must be later than good revision. May be either a git' +
2625 ' or svn revision.')
2626 group
.add_option('-g', '--good_revision',
2628 help='A revision to start bisection where performance' +
2629 ' test is known to pass. Must be earlier than the ' +
2630 'bad revision. May be either a git or svn revision.')
2631 group
.add_option('-m', '--metric',
2633 help='The desired metric to bisect on. For example ' +
2634 '"vm_rss_final_b/vm_rss_f_b"')
2635 group
.add_option('-r', '--repeat_test_count',
2638 help='The number of times to repeat the performance '
2639 'test. Values will be clamped to range [1, 100]. '
2640 'Default value is 20.')
2641 group
.add_option('--max_time_minutes',
2644 help='The maximum time (in minutes) to take running the '
2645 'performance tests. The script will run the performance '
2646 'tests according to --repeat_test_count, so long as it '
2647 'doesn\'t exceed --max_time_minutes. Values will be '
2648 'clamped to range [1, 60].'
2649 'Default value is 20.')
2650 group
.add_option('-t', '--truncate_percent',
2653 help='The highest/lowest % are discarded to form a '
2654 'truncated mean. Values will be clamped to range [0, '
2655 '25]. Default value is 25 (highest/lowest 25% will be '
2657 parser
.add_option_group(group
)
2659 group
= optparse
.OptionGroup(parser
, 'Build options')
2660 group
.add_option('-w', '--working_directory',
2662 help='Path to the working directory where the script '
2663 'will do an initial checkout of the chromium depot. The '
2664 'files will be placed in a subdirectory "bisect" under '
2665 'working_directory and that will be used to perform the '
2666 'bisection. This parameter is optional, if it is not '
2667 'supplied, the script will work from the current depot.')
2668 group
.add_option('--build_preference',
2670 choices
=['msvs', 'ninja', 'make'],
2671 help='The preferred build system to use. On linux/mac '
2672 'the options are make/ninja. On Windows, the options '
2674 group
.add_option('--target_platform',
2676 choices
=['chromium', 'cros', 'android'],
2678 help='The target platform. Choices are "chromium" '
2679 '(current platform), "cros", or "android". If you '
2680 'specify something other than "chromium", you must be '
2681 'properly set up to build that platform.')
2682 group
.add_option('--no_custom_deps',
2683 dest
='no_custom_deps',
2684 action
="store_true",
2686 help='Run the script with custom_deps or not.')
2687 group
.add_option('--cros_board',
2689 help='The cros board type to build.')
2690 group
.add_option('--cros_remote_ip',
2692 help='The remote machine to image to.')
2693 group
.add_option('--use_goma',
2694 action
="store_true",
2695 help='Add a bunch of extra threads for goma.')
2696 group
.add_option('--output_buildbot_annotations',
2697 action
="store_true",
2698 help='Add extra annotation output for buildbot.')
2699 parser
.add_option_group(group
)
2701 group
= optparse
.OptionGroup(parser
, 'Debug options')
2702 group
.add_option('--debug_ignore_build',
2703 action
="store_true",
2704 help='DEBUG: Don\'t perform builds.')
2705 group
.add_option('--debug_ignore_sync',
2706 action
="store_true",
2707 help='DEBUG: Don\'t perform syncs.')
2708 group
.add_option('--debug_ignore_perf_test',
2709 action
="store_true",
2710 help='DEBUG: Don\'t perform performance tests.')
2711 parser
.add_option_group(group
)
2716 def ParseCommandLine(self
):
2717 """Parses the command line for bisect options."""
2718 parser
= self
._CreateCommandLineParser
()
2719 (opts
, args
) = parser
.parse_args()
2722 if not opts
.command
:
2723 raise RuntimeError('missing required parameter: --command')
2725 if not opts
.good_revision
:
2726 raise RuntimeError('missing required parameter: --good_revision')
2728 if not opts
.bad_revision
:
2729 raise RuntimeError('missing required parameter: --bad_revision')
2732 raise RuntimeError('missing required parameter: --metric')
2734 if opts
.target_platform
== 'cros':
2735 # Run sudo up front to make sure credentials are cached for later.
2736 print 'Sudo is required to build cros:'
2738 RunProcess(['sudo', 'true'])
2740 if not opts
.cros_board
:
2741 raise RuntimeError('missing required parameter: --cros_board')
2743 if not opts
.cros_remote_ip
:
2744 raise RuntimeError('missing required parameter: --cros_remote_ip')
2746 if not opts
.working_directory
:
2747 raise RuntimeError('missing required parameter: --working_directory')
2749 metric_values
= opts
.metric
.split('/')
2750 if len(metric_values
) != 2:
2751 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
2753 opts
.metric
= metric_values
2754 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
2755 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
2756 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
2757 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
2759 for k
, v
in opts
.__dict
__.iteritems():
2760 assert hasattr(self
, k
), "Invalid %s attribute in BisectOptions." % k
2762 except RuntimeError, e
:
2763 output_string
= StringIO
.StringIO()
2764 parser
.print_help(file=output_string
)
2765 error_message
= '%s\n\n%s' % (e
.message
, output_string
.getvalue())
2766 output_string
.close()
2767 raise RuntimeError(error_message
)
2770 def FromDict(values
):
2771 """Creates an instance of BisectOptions with the values parsed from a
2775 values: a dict containing options to set.
2778 An instance of BisectOptions.
2780 opts
= BisectOptions()
2782 for k
, v
in values
.iteritems():
2783 assert hasattr(opts
, k
), 'Invalid %s attribute in '\
2784 'BisectOptions.' % k
2787 metric_values
= opts
.metric
.split('/')
2788 if len(metric_values
) != 2:
2789 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
2791 opts
.metric
= metric_values
2792 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
2793 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
2794 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
2795 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
2803 opts
= BisectOptions()
2804 parse_results
= opts
.ParseCommandLine()
2806 if opts
.working_directory
:
2807 custom_deps
= bisect_utils
.DEFAULT_GCLIENT_CUSTOM_DEPS
2808 if opts
.no_custom_deps
:
2810 bisect_utils
.CreateBisectDirectoryAndSetupDepot(opts
, custom_deps
)
2812 os
.chdir(os
.path
.join(os
.getcwd(), 'src'))
2814 if not RemoveBuildFiles():
2815 raise RuntimeError('Something went wrong removing the build files.')
2817 if not IsPlatformSupported(opts
):
2818 raise RuntimeError("Sorry, this platform isn't supported yet.")
2820 # Check what source control method they're using. Only support git workflow
2822 source_control
= DetermineAndCreateSourceControl(opts
)
2824 if not source_control
:
2825 raise RuntimeError("Sorry, only the git workflow is supported at the "
2828 # gClient sync seems to fail if you're not in master branch.
2829 if not source_control
.IsInProperBranch() and not opts
.debug_ignore_sync
:
2830 raise RuntimeError("You must switch to master branch to run bisection.")
2832 bisect_test
= BisectPerformanceMetrics(source_control
, opts
)
2834 bisect_results
= bisect_test
.Run(opts
.command
,
2838 if bisect_results
['error']:
2839 raise RuntimeError(bisect_results
['error'])
2840 bisect_test
.FormatAndPrintResults(bisect_results
)
2843 bisect_test
.PerformCleanup()
2844 except RuntimeError, e
:
2845 if opts
.output_buildbot_annotations
:
2846 # The perf dashboard scrapes the "results" step in order to comment on
2847 # bugs. If you change this, please update the perf dashboard as well.
2848 bisect_utils
.OutputAnnotationStepStart('Results')
2849 print 'Error: %s' % e
.message
2850 if opts
.output_buildbot_annotations
:
2851 bisect_utils
.OutputAnnotationStepClosed()
2854 if __name__
== '__main__':