2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
56 # The additional repositories that might need to be bisected.
57 # If the repository has any dependant repositories (such as skia/src needs
58 # skia/include and skia/gyp to be updated), specify them in the 'depends'
59 # so that they're synced appropriately.
61 # src: path to the working directory.
62 # recurse: True if this repositry will get bisected.
63 # depends: A list of other repositories that are actually part of the same
65 # svn: Needed for git workflow to resolve hashes to svn revisions.
66 # from: Parent depot that must be bisected before this is bisected.
72 "from" : ['cros', 'android-chrome'],
73 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision='
76 "src" : "src/third_party/WebKit",
79 "from" : ['chromium'],
80 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision='
83 "src" : "src/third_party/angle",
84 "src_old" : "src/third_party/angle_dx11",
87 "from" : ['chromium'],
94 "from" : ['chromium'],
95 "custom_deps": bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
,
96 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
98 'v8_bleeding_edge' : {
99 "src" : "src/v8_bleeding_edge",
102 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
107 "src" : "src/third_party/skia/src",
109 "svn" : "http://skia.googlecode.com/svn/trunk/src",
110 "depends" : ['skia/include', 'skia/gyp'],
111 "from" : ['chromium'],
112 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
115 "src" : "src/third_party/skia/include",
117 "svn" : "http://skia.googlecode.com/svn/trunk/include",
119 "from" : ['chromium'],
120 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 "src" : "src/third_party/skia/gyp",
125 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
127 "from" : ['chromium'],
128 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
132 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
133 CROS_SDK_PATH
= os
.path
.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
134 CROS_VERSION_PATTERN
= 'new version number from %s'
135 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
136 CROS_TEST_KEY_PATH
= os
.path
.join('..', 'cros', 'chromite', 'ssh_keys',
138 CROS_SCRIPT_KEY_PATH
= os
.path
.join('..', 'cros', 'src', 'scripts',
139 'mod_for_test_scripts', 'ssh_keys',
142 BUILD_RESULT_SUCCEED
= 0
143 BUILD_RESULT_FAIL
= 1
144 BUILD_RESULT_SKIPPED
= 2
147 def _AddAdditionalDepotInfo(depot_info
):
148 """Adds additional depot info to the global depot variables."""
149 global DEPOT_DEPS_NAME
151 DEPOT_DEPS_NAME
= dict(DEPOT_DEPS_NAME
.items() +
153 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
156 def CalculateTruncatedMean(data_set
, truncate_percent
):
157 """Calculates the truncated mean of a set of values.
160 data_set: Set of values to use in calculation.
161 truncate_percent: The % from the upper/lower portions of the data set to
162 discard, expressed as a value in [0, 1].
165 The truncated mean as a float.
167 if len(data_set
) > 2:
168 data_set
= sorted(data_set
)
170 discard_num_float
= len(data_set
) * truncate_percent
171 discard_num_int
= int(math
.floor(discard_num_float
))
172 kept_weight
= len(data_set
) - discard_num_float
* 2
174 data_set
= data_set
[discard_num_int
:len(data_set
)-discard_num_int
]
176 weight_left
= 1.0 - (discard_num_float
- discard_num_int
)
179 # If the % to discard leaves a fractional portion, need to weight those
181 unweighted_vals
= data_set
[1:len(data_set
)-1]
182 weighted_vals
= [data_set
[0], data_set
[len(data_set
)-1]]
183 weighted_vals
= [w
* weight_left
for w
in weighted_vals
]
184 data_set
= weighted_vals
+ unweighted_vals
186 kept_weight
= len(data_set
)
188 truncated_mean
= reduce(lambda x
, y
: float(x
) + float(y
),
189 data_set
) / kept_weight
191 return truncated_mean
194 def CalculateStandardDeviation(v
):
198 mean
= CalculateTruncatedMean(v
, 0.0)
199 variances
= [float(x
) - mean
for x
in v
]
200 variances
= [x
* x
for x
in variances
]
201 variance
= reduce(lambda x
, y
: float(x
) + float(y
), variances
) / (len(v
) - 1)
202 std_dev
= math
.sqrt(variance
)
207 def CalculatePooledStandardError(work_sets
):
212 for current_set
in work_sets
:
213 std_dev
= CalculateStandardDeviation(current_set
)
214 numerator
+= (len(current_set
) - 1) * std_dev
** 2
215 denominator1
+= len(current_set
) - 1
216 denominator2
+= 1.0 / len(current_set
)
219 return math
.sqrt(numerator
/ denominator1
) * math
.sqrt(denominator2
)
223 def CalculateStandardError(v
):
227 std_dev
= CalculateStandardDeviation(v
)
229 return std_dev
/ math
.sqrt(len(v
))
232 def IsStringFloat(string_to_check
):
233 """Checks whether or not the given string can be converted to a floating
237 string_to_check: Input string to check if it can be converted to a float.
240 True if the string can be converted to a float.
243 float(string_to_check
)
250 def IsStringInt(string_to_check
):
251 """Checks whether or not the given string can be converted to a integer.
254 string_to_check: Input string to check if it can be converted to an int.
257 True if the string can be converted to an int.
268 """Checks whether or not the script is running on Windows.
271 True if running on Windows.
273 return os
.name
== 'nt'
276 def RunProcess(command
):
277 """Run an arbitrary command. If output from the call is needed, use
278 RunProcessAndRetrieveOutput instead.
281 command: A list containing the command and args to execute.
284 The return code of the call.
286 # On Windows, use shell=True to get PATH interpretation.
288 return subprocess
.call(command
, shell
=shell
)
291 def RunProcessAndRetrieveOutput(command
, cwd
=None):
292 """Run an arbitrary command, returning its output and return code. Since
293 output is collected via communicate(), there will be no output until the
294 call terminates. If you need output while the program runs (ie. so
295 that the buildbot doesn't terminate the script), consider RunProcess().
298 command: A list containing the command and args to execute.
301 A tuple of the output and return code.
303 # On Windows, use shell=True to get PATH interpretation.
305 proc
= subprocess
.Popen(command
, shell
=shell
, stdout
=subprocess
.PIPE
, cwd
=cwd
)
307 (output
, _
) = proc
.communicate()
309 return (output
, proc
.returncode
)
312 def RunGit(command
, cwd
=None):
313 """Run a git subcommand, returning its output and return code.
316 command: A list containing the args to git.
319 A tuple of the output and return code.
321 command
= ['git'] + command
323 return RunProcessAndRetrieveOutput(command
, cwd
=cwd
)
326 def CheckRunGit(command
, cwd
=None):
327 """Run a git subcommand, returning its output and return code. Asserts if
328 the return code of the call is non-zero.
331 command: A list containing the args to git.
334 A tuple of the output and return code.
336 (output
, return_code
) = RunGit(command
, cwd
=cwd
)
338 assert not return_code
, 'An error occurred while running'\
339 ' "git %s"' % ' '.join(command
)
343 def SetBuildSystemDefault(build_system
):
344 """Sets up any environment variables needed to build with the specified build
348 build_system: A string specifying build system. Currently only 'ninja' or
349 'make' are supported."""
350 if build_system
== 'ninja':
351 gyp_var
= os
.getenv('GYP_GENERATORS')
353 if not gyp_var
or not 'ninja' in gyp_var
:
355 os
.environ
['GYP_GENERATORS'] = gyp_var
+ ',ninja'
357 os
.environ
['GYP_GENERATORS'] = 'ninja'
360 os
.environ
['GYP_DEFINES'] = 'component=shared_library '\
361 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
363 elif build_system
== 'make':
364 os
.environ
['GYP_GENERATORS'] = 'make'
366 raise RuntimeError('%s build not supported.' % build_system
)
369 def BuildWithMake(threads
, targets
):
370 cmd
= ['make', 'BUILDTYPE=Release']
373 cmd
.append('-j%d' % threads
)
377 return_code
= RunProcess(cmd
)
379 return not return_code
382 def BuildWithNinja(threads
, targets
):
383 cmd
= ['ninja', '-C', os
.path
.join('out', 'Release')]
386 cmd
.append('-j%d' % threads
)
390 return_code
= RunProcess(cmd
)
392 return not return_code
395 def BuildWithVisualStudio(targets
):
396 path_to_devenv
= os
.path
.abspath(
397 os
.path
.join(os
.environ
['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
398 path_to_sln
= os
.path
.join(os
.getcwd(), 'chrome', 'chrome.sln')
399 cmd
= [path_to_devenv
, '/build', 'Release', path_to_sln
]
402 cmd
.extend(['/Project', t
])
404 return_code
= RunProcess(cmd
)
406 return not return_code
409 class Builder(object):
410 """Builder is used by the bisect script to build relevant targets and deploy.
412 def __init__(self
, opts
):
413 """Performs setup for building with target build system.
416 opts: Options parsed from command line.
419 if not opts
.build_preference
:
420 opts
.build_preference
= 'msvs'
422 if opts
.build_preference
== 'msvs':
423 if not os
.getenv('VS100COMNTOOLS'):
425 'Path to visual studio could not be determined.')
427 SetBuildSystemDefault(opts
.build_preference
)
429 if not opts
.build_preference
:
430 if 'ninja' in os
.getenv('GYP_GENERATORS'):
431 opts
.build_preference
= 'ninja'
433 opts
.build_preference
= 'make'
435 SetBuildSystemDefault(opts
.build_preference
)
437 if not bisect_utils
.SetupPlatformBuildEnvironment(opts
):
438 raise RuntimeError('Failed to set platform environment.')
440 bisect_utils
.RunGClient(['runhooks'])
445 if opts
.target_platform
== 'cros':
446 builder
= CrosBuilder(opts
)
447 elif opts
.target_platform
== 'android':
448 builder
= AndroidBuilder(opts
)
449 elif opts
.target_platform
== 'android-chrome':
450 builder
= AndroidChromeBuilder(opts
)
452 builder
= DesktopBuilder(opts
)
455 def Build(self
, depot
, opts
):
456 raise NotImplementedError()
459 class DesktopBuilder(Builder
):
460 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
461 def __init__(self
, opts
):
462 super(DesktopBuilder
, self
).__init
__(opts
)
464 def Build(self
, depot
, opts
):
465 """Builds chromium_builder_perf target using options passed into
469 depot: Current depot being bisected.
470 opts: The options parsed from the command line.
473 True if build was successful.
475 targets
= ['chromium_builder_perf']
481 build_success
= False
482 if opts
.build_preference
== 'make':
483 build_success
= BuildWithMake(threads
, targets
)
484 elif opts
.build_preference
== 'ninja':
485 build_success
= BuildWithNinja(threads
, targets
)
486 elif opts
.build_preference
== 'msvs':
487 assert IsWindows(), 'msvs is only supported on Windows.'
488 build_success
= BuildWithVisualStudio(targets
)
490 assert False, 'No build system defined.'
494 class AndroidBuilder(Builder
):
495 """AndroidBuilder is used to build on android."""
496 def __init__(self
, opts
):
497 super(AndroidBuilder
, self
).__init
__(opts
)
499 def _GetTargets(self
):
500 return ['chromium_testshell', 'cc_perftests_apk', 'android_tools']
502 def Build(self
, depot
, opts
):
503 """Builds the android content shell and other necessary tools using options
504 passed into the script.
507 depot: Current depot being bisected.
508 opts: The options parsed from the command line.
511 True if build was successful.
517 build_success
= False
518 if opts
.build_preference
== 'ninja':
519 build_success
= BuildWithNinja(threads
, self
._GetTargets
())
521 assert False, 'No build system defined.'
526 class AndroidChromeBuilder(AndroidBuilder
):
527 """AndroidBuilder is used to build on android's chrome."""
528 def __init__(self
, opts
):
529 super(AndroidChromeBuilder
, self
).__init
__(opts
)
531 def _GetTargets(self
):
532 return AndroidBuilder
._GetTargets
(self
) + ['chrome_apk']
535 class CrosBuilder(Builder
):
536 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
538 def __init__(self
, opts
):
539 super(CrosBuilder
, self
).__init
__(opts
)
541 def ImageToTarget(self
, opts
):
542 """Installs latest image to target specified by opts.cros_remote_ip.
545 opts: Program options containing cros_board and cros_remote_ip.
551 # Keys will most likely be set to 0640 after wiping the chroot.
552 os
.chmod(CROS_SCRIPT_KEY_PATH
, 0600)
553 os
.chmod(CROS_TEST_KEY_PATH
, 0600)
554 cmd
= [CROS_SDK_PATH
, '--', './bin/cros_image_to_target.py',
555 '--remote=%s' % opts
.cros_remote_ip
,
556 '--board=%s' % opts
.cros_board
, '--test', '--verbose']
558 return_code
= RunProcess(cmd
)
559 return not return_code
563 def BuildPackages(self
, opts
, depot
):
564 """Builds packages for cros.
567 opts: Program options containing cros_board.
568 depot: The depot being bisected.
573 cmd
= [CROS_SDK_PATH
]
576 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
577 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
582 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
584 cmd
+= ['BUILDTYPE=Release', './build_packages',
585 '--board=%s' % opts
.cros_board
]
586 return_code
= RunProcess(cmd
)
588 return not return_code
590 def BuildImage(self
, opts
, depot
):
591 """Builds test image for cros.
594 opts: Program options containing cros_board.
595 depot: The depot being bisected.
600 cmd
= [CROS_SDK_PATH
]
603 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
604 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
609 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
611 cmd
+= ['BUILDTYPE=Release', '--', './build_image',
612 '--board=%s' % opts
.cros_board
, 'test']
614 return_code
= RunProcess(cmd
)
616 return not return_code
618 def Build(self
, depot
, opts
):
619 """Builds targets using options passed into the script.
622 depot: Current depot being bisected.
623 opts: The options parsed from the command line.
626 True if build was successful.
628 if self
.BuildPackages(opts
, depot
):
629 if self
.BuildImage(opts
, depot
):
630 return self
.ImageToTarget(opts
)
634 class SourceControl(object):
635 """SourceControl is an abstraction over the underlying source control
636 system used for chromium. For now only git is supported, but in the
637 future, the svn workflow could be added as well."""
639 super(SourceControl
, self
).__init
__()
641 def SyncToRevisionWithGClient(self
, revision
):
642 """Uses gclient to sync to the specified revision.
644 ie. gclient sync --revision <revision>
647 revision: The git SHA1 or svn CL (depending on workflow).
650 The return code of the call.
652 return bisect_utils
.RunGClient(['sync', '--revision',
653 revision
, '--verbose', '--nohooks', '--reset', '--force'])
655 def SyncToRevisionWithRepo(self
, timestamp
):
656 """Uses repo to sync all the underlying git depots to the specified
660 timestamp: The unix timestamp to sync to.
663 The return code of the call.
665 return bisect_utils
.RunRepoSyncAtTimestamp(timestamp
)
668 class GitSourceControl(SourceControl
):
669 """GitSourceControl is used to query the underlying source control. """
670 def __init__(self
, opts
):
671 super(GitSourceControl
, self
).__init
__()
677 def GetRevisionList(self
, revision_range_end
, revision_range_start
, cwd
=None):
678 """Retrieves a list of revisions between |revision_range_start| and
679 |revision_range_end|.
682 revision_range_end: The SHA1 for the end of the range.
683 revision_range_start: The SHA1 for the beginning of the range.
686 A list of the revisions between |revision_range_start| and
687 |revision_range_end| (inclusive).
689 revision_range
= '%s..%s' % (revision_range_start
, revision_range_end
)
690 cmd
= ['log', '--format=%H', '-10000', '--first-parent', revision_range
]
691 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
693 revision_hash_list
= log_output
.split()
694 revision_hash_list
.append(revision_range_start
)
696 return revision_hash_list
698 def SyncToRevision(self
, revision
, sync_client
=None):
699 """Syncs to the specified revision.
702 revision: The revision to sync to.
703 use_gclient: Specifies whether or not we should sync using gclient or
704 just use source control directly.
711 results
= RunGit(['checkout', revision
])[1]
712 elif sync_client
== 'gclient':
713 results
= self
.SyncToRevisionWithGClient(revision
)
714 elif sync_client
== 'repo':
715 results
= self
.SyncToRevisionWithRepo(revision
)
719 def ResolveToRevision(self
, revision_to_check
, depot
, search
, cwd
=None):
720 """If an SVN revision is supplied, try to resolve it to a git SHA1.
723 revision_to_check: The user supplied revision string that may need to be
724 resolved to a git SHA1.
725 depot: The depot the revision_to_check is from.
726 search: The number of changelists to try if the first fails to resolve
727 to a git hash. If the value is negative, the function will search
728 backwards chronologically, otherwise it will search forward.
731 A string containing a git SHA1 hash, otherwise None.
733 # Android-chrome is git only, so no need to resolve this to anything else.
734 if depot
== 'android-chrome':
735 return revision_to_check
738 if not IsStringInt(revision_to_check
):
739 return revision_to_check
741 depot_svn
= 'svn://svn.chromium.org/chrome/trunk/src'
743 if depot
!= 'chromium':
744 depot_svn
= DEPOT_DEPS_NAME
[depot
]['svn']
746 svn_revision
= int(revision_to_check
)
750 search_range
= xrange(svn_revision
, svn_revision
+ search
, 1)
752 search_range
= xrange(svn_revision
, svn_revision
+ search
, -1)
754 for i
in search_range
:
755 svn_pattern
= 'git-svn-id: %s@%d' % (depot_svn
, i
)
756 cmd
= ['log', '--format=%H', '-1', '--grep', svn_pattern
,
759 (log_output
, return_code
) = RunGit(cmd
, cwd
=cwd
)
761 assert not return_code
, 'An error occurred while running'\
762 ' "git %s"' % ' '.join(cmd
)
765 log_output
= log_output
.strip()
768 git_revision
= log_output
774 if IsStringInt(revision_to_check
):
775 return int(revision_to_check
)
778 os
.chdir(os
.path
.join(os
.getcwd(), 'src', 'third_party',
779 'chromiumos-overlay'))
780 pattern
= CROS_VERSION_PATTERN
% revision_to_check
781 cmd
= ['log', '--format=%ct', '-1', '--grep', pattern
]
785 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
787 git_revision
= log_output
788 git_revision
= int(log_output
.strip())
793 def IsInProperBranch(self
):
794 """Confirms they're in the master branch for performing the bisection.
795 This is needed or gclient will fail to sync properly.
798 True if the current branch on src is 'master'
800 cmd
= ['rev-parse', '--abbrev-ref', 'HEAD']
801 log_output
= CheckRunGit(cmd
)
802 log_output
= log_output
.strip()
804 return log_output
== "master"
806 def SVNFindRev(self
, revision
):
807 """Maps directly to the 'git svn find-rev' command.
810 revision: The git SHA1 to use.
813 An integer changelist #, otherwise None.
816 cmd
= ['svn', 'find-rev', revision
]
818 output
= CheckRunGit(cmd
)
819 svn_revision
= output
.strip()
821 if IsStringInt(svn_revision
):
822 return int(svn_revision
)
826 def QueryRevisionInfo(self
, revision
, cwd
=None):
827 """Gathers information on a particular revision, such as author's name,
828 email, subject, and date.
831 revision: Revision you want to gather information on.
833 A dict in the following format:
844 formats
= ['%cN', '%cE', '%s', '%cD', '%b']
845 targets
= ['author', 'email', 'subject', 'date', 'body']
847 for i
in xrange(len(formats
)):
848 cmd
= ['log', '--format=%s' % formats
[i
], '-1', revision
]
849 output
= CheckRunGit(cmd
, cwd
=cwd
)
850 commit_info
[targets
[i
]] = output
.rstrip()
854 def CheckoutFileAtRevision(self
, file_name
, revision
, cwd
=None):
855 """Performs a checkout on a file at the given revision.
860 return not RunGit(['checkout', revision
, file_name
], cwd
=cwd
)[1]
862 def RevertFileToHead(self
, file_name
):
863 """Unstages a file and returns it to HEAD.
868 # Reset doesn't seem to return 0 on success.
869 RunGit(['reset', 'HEAD', bisect_utils
.FILE_DEPS_GIT
])
871 return not RunGit(['checkout', bisect_utils
.FILE_DEPS_GIT
])[1]
873 def QueryFileRevisionHistory(self
, filename
, revision_start
, revision_end
):
874 """Returns a list of commits that modified this file.
877 filename: Name of file.
878 revision_start: Start of revision range.
879 revision_end: End of revision range.
882 Returns a list of commits that touched this file.
884 cmd
= ['log', '--format=%H', '%s~1..%s' % (revision_start
, revision_end
),
886 output
= CheckRunGit(cmd
)
888 return [o
for o
in output
.split('\n') if o
]
890 class BisectPerformanceMetrics(object):
891 """BisectPerformanceMetrics performs a bisection against a list of range
892 of revisions to narrow down where performance regressions may have
895 def __init__(self
, source_control
, opts
):
896 super(BisectPerformanceMetrics
, self
).__init
__()
899 self
.source_control
= source_control
900 self
.src_cwd
= os
.getcwd()
901 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
903 self
.cleanup_commands
= []
905 self
.builder
= Builder
.FromOpts(opts
)
907 # This always starts true since the script grabs latest first.
908 self
.was_blink
= True
910 for d
in DEPOT_NAMES
:
911 # The working directory of each depot is just the path to the depot, but
912 # since we're already in 'src', we can skip that part.
914 self
.depot_cwd
[d
] = os
.path
.join(
915 self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
917 def PerformCleanup(self
):
918 """Performs cleanup when script is finished."""
919 os
.chdir(self
.src_cwd
)
920 for c
in self
.cleanup_commands
:
922 shutil
.move(c
[1], c
[2])
924 assert False, 'Invalid cleanup command.'
926 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
927 """Retrieves a list of all the commits between the bad revision and
928 last known good revision."""
930 revision_work_list
= []
933 revision_range_start
= good_revision
934 revision_range_end
= bad_revision
937 self
.ChangeToDepotWorkingDirectory('cros')
939 # Print the commit timestamps for every commit in the revision time
940 # range. We'll sort them and bisect by that. There is a remote chance that
941 # 2 (or more) commits will share the exact same timestamp, but it's
942 # probably safe to ignore that case.
943 cmd
= ['repo', 'forall', '-c',
944 'git log --format=%%ct --before=%d --after=%d' % (
945 revision_range_end
, revision_range_start
)]
946 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
948 assert not return_code
, 'An error occurred while running'\
949 ' "%s"' % ' '.join(cmd
)
953 revision_work_list
= list(set(
954 [int(o
) for o
in output
.split('\n') if IsStringInt(o
)]))
955 revision_work_list
= sorted(revision_work_list
, reverse
=True)
957 cwd
= self
._GetDepotDirectory
(depot
)
958 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
959 good_revision
, cwd
=cwd
)
961 return revision_work_list
963 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self
, revision
):
964 svn_revision
= self
.source_control
.SVNFindRev(revision
)
966 if IsStringInt(svn_revision
):
967 # V8 is tricky to bisect, in that there are only a few instances when
968 # we can dive into bleeding_edge and get back a meaningful result.
969 # Try to detect a V8 "business as usual" case, which is when:
970 # 1. trunk revision N has description "Version X.Y.Z"
971 # 2. bleeding_edge revision (N-1) has description "Prepare push to
972 # trunk. Now working on X.Y.(Z+1)."
973 v8_dir
= self
._GetDepotDirectory
('v8')
974 v8_bleeding_edge_dir
= self
._GetDepotDirectory
('v8_bleeding_edge')
976 revision_info
= self
.source_control
.QueryRevisionInfo(revision
,
979 version_re
= re
.compile("Version (?P<values>[0-9,.]+)")
981 regex_results
= version_re
.search(revision_info
['subject'])
984 version
= regex_results
.group('values')
986 git_revision
= self
.source_control
.ResolveToRevision(
987 int(svn_revision
) - 1, 'v8_bleeding_edge', -1,
988 cwd
=v8_bleeding_edge_dir
)
991 revision_info
= self
.source_control
.QueryRevisionInfo(git_revision
,
992 cwd
=v8_bleeding_edge_dir
)
994 if 'Prepare push to trunk' in revision_info
['subject']:
998 def _GetNearestV8BleedingEdgeFromTrunk(self
, revision
, search_forward
=True):
999 cwd
= self
._GetDepotDirectory
('v8')
1000 cmd
= ['log', '--format=%ct', '-1', revision
]
1001 output
= CheckRunGit(cmd
, cwd
=cwd
)
1002 commit_time
= int(output
)
1006 cmd
= ['log', '--format=%H', '-10', '--after=%d' % commit_time
,
1008 output
= CheckRunGit(cmd
, cwd
=cwd
)
1009 output
= output
.split()
1011 commits
= reversed(commits
)
1013 cmd
= ['log', '--format=%H', '-10', '--before=%d' % commit_time
,
1015 output
= CheckRunGit(cmd
, cwd
=cwd
)
1016 output
= output
.split()
1019 bleeding_edge_revision
= None
1022 bleeding_edge_revision
= self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(c
)
1023 if bleeding_edge_revision
:
1026 return bleeding_edge_revision
1028 def Get3rdPartyRevisionsFromCurrentRevision(self
, depot
, revision
):
1029 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1032 A dict in the format {depot:revision} if successful, otherwise None.
1036 self
.ChangeToDepotWorkingDirectory(depot
)
1040 if depot
== 'chromium' or depot
== 'android-chrome':
1041 locals = {'Var': lambda _
: locals["vars"][_
],
1042 'From': lambda *args
: None}
1043 execfile(bisect_utils
.FILE_DEPS_GIT
, {}, locals)
1047 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1049 for d
in DEPOT_NAMES
:
1050 if DEPOT_DEPS_NAME
[d
].has_key('platform'):
1051 if DEPOT_DEPS_NAME
[d
]['platform'] != os
.name
:
1054 if (DEPOT_DEPS_NAME
[d
]['recurse'] and
1055 depot
in DEPOT_DEPS_NAME
[d
]['from']):
1056 if (locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src']) or
1057 locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src_old'])):
1058 if locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src']):
1059 re_results
= rxp
.search(locals['deps'][DEPOT_DEPS_NAME
[d
]['src']])
1060 self
.depot_cwd
[d
] =\
1061 os
.path
.join(self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
1062 elif locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src_old']):
1064 rxp
.search(locals['deps'][DEPOT_DEPS_NAME
[d
]['src_old']])
1065 self
.depot_cwd
[d
] =\
1066 os
.path
.join(self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src_old'][4:])
1069 results
[d
] = re_results
.group('revision')
1071 print 'Couldn\'t parse revision for %s.' % d
1075 print 'Couldn\'t find %s while parsing .DEPS.git.' % d
1078 elif depot
== 'cros':
1079 cmd
= [CROS_SDK_PATH
, '--', 'portageq-%s' % self
.opts
.cros_board
,
1080 'best_visible', '/build/%s' % self
.opts
.cros_board
, 'ebuild',
1081 CROS_CHROMEOS_PATTERN
]
1082 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1084 assert not return_code
, 'An error occurred while running'\
1085 ' "%s"' % ' '.join(cmd
)
1087 if len(output
) > CROS_CHROMEOS_PATTERN
:
1088 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
1091 output
= output
.split('_')[0]
1094 contents
= output
.split('.')
1096 version
= contents
[2]
1098 if contents
[3] != '0':
1099 warningText
= 'Chrome version: %s.%s but using %s.0 to bisect.' %\
1100 (version
, contents
[3], version
)
1101 if not warningText
in self
.warnings
:
1102 self
.warnings
.append(warningText
)
1105 self
.ChangeToDepotWorkingDirectory('chromium')
1106 return_code
= CheckRunGit(['log', '-1', '--format=%H',
1107 '--author=chrome-release@google.com', '--grep=to %s' % version
,
1111 results
['chromium'] = output
.strip()
1113 # We can't try to map the trunk revision to bleeding edge yet, because
1114 # we don't know which direction to try to search in. Have to wait until
1115 # the bisect has narrowed the results down to 2 v8 rolls.
1116 results
['v8_bleeding_edge'] = None
1120 def BuildCurrentRevision(self
, depot
):
1121 """Builds chrome and performance_ui_tests on the current revision.
1124 True if the build was successful.
1126 if self
.opts
.debug_ignore_build
:
1130 os
.chdir(self
.src_cwd
)
1132 build_success
= self
.builder
.Build(depot
, self
.opts
)
1136 return build_success
1138 def RunGClientHooks(self
):
1139 """Runs gclient with runhooks command.
1142 True if gclient reports no errors.
1145 if self
.opts
.debug_ignore_build
:
1148 return not bisect_utils
.RunGClient(['runhooks'], cwd
=self
.src_cwd
)
1150 def TryParseHistogramValuesFromOutput(self
, metric
, text
):
1151 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1154 metric: The metric as a list of [<trace>, <value>] strings.
1155 text: The text to parse the metric values from.
1158 A list of floating point numbers found.
1160 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
1162 text_lines
= text
.split('\n')
1165 for current_line
in text_lines
:
1166 if metric_formatted
in current_line
:
1167 current_line
= current_line
[len(metric_formatted
):]
1170 histogram_values
= eval(current_line
)
1172 for b
in histogram_values
['buckets']:
1173 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
1174 # Extends the list with N-elements with the average for that bucket.
1175 values_list
.extend([average_for_bucket
] * b
['count'])
1181 def TryParseResultValuesFromOutput(self
, metric
, text
):
1182 """Attempts to parse a metric in the format RESULT <graph: <trace>.
1185 metric: The metric as a list of [<trace>, <value>] strings.
1186 text: The text to parse the metric values from.
1189 A list of floating point numbers found.
1191 # Format is: RESULT <graph>: <trace>= <value> <units>
1192 metric_formatted
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
1194 text_lines
= text
.split('\n')
1197 for current_line
in text_lines
:
1198 # Parse the output from the performance test for the metric we're
1200 metric_re
= metric_formatted
+\
1201 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1202 metric_re
= re
.compile(metric_re
)
1203 regex_results
= metric_re
.search(current_line
)
1205 if not regex_results
is None:
1206 values_list
+= [regex_results
.group('values')]
1208 metric_re
= metric_formatted
+\
1209 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1210 metric_re
= re
.compile(metric_re
)
1211 regex_results
= metric_re
.search(current_line
)
1213 if not regex_results
is None:
1214 metric_values
= regex_results
.group('values')
1216 values_list
+= metric_values
.split(',')
1218 values_list
= [float(v
) for v
in values_list
if IsStringFloat(v
)]
1220 # If the metric is times/t, we need to sum the timings in order to get
1221 # similar regression results as the try-bots.
1222 metrics_to_sum
= [['times', 't'], ['times', 'page_load_time'],
1223 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1225 if metric
in metrics_to_sum
:
1227 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
1231 def ParseMetricValuesFromOutput(self
, metric
, text
):
1232 """Parses output from performance_ui_tests and retrieves the results for
1236 metric: The metric as a list of [<trace>, <value>] strings.
1237 text: The text to parse the metric values from.
1240 A list of floating point numbers found.
1242 metric_values
= self
.TryParseResultValuesFromOutput(metric
, text
)
1244 if not metric_values
:
1245 metric_values
= self
.TryParseHistogramValuesFromOutput(metric
, text
)
1247 return metric_values
1249 def _GenerateProfileIfNecessary(self
, command_args
):
1250 """Checks the command line of the performance test for dependencies on
1251 profile generation, and runs tools/perf/generate_profile as necessary.
1254 command_args: Command line being passed to performance test, as a list.
1257 False if profile generation was necessary and failed, otherwise True.
1260 if '--profile-dir' in ' '.join(command_args
):
1261 # If we were using python 2.7+, we could just use the argparse
1262 # module's parse_known_args to grab --profile-dir. Since some of the
1263 # bots still run 2.6, have to grab the arguments manually.
1265 args_to_parse
= ['--profile-dir', '--browser']
1267 for arg_to_parse
in args_to_parse
:
1268 for i
, current_arg
in enumerate(command_args
):
1269 if arg_to_parse
in current_arg
:
1270 current_arg_split
= current_arg
.split('=')
1272 # Check 2 cases, --arg=<val> and --arg <val>
1273 if len(current_arg_split
) == 2:
1274 arg_dict
[arg_to_parse
] = current_arg_split
[1]
1275 elif i
+ 1 < len(command_args
):
1276 arg_dict
[arg_to_parse
] = command_args
[i
+1]
1278 path_to_generate
= os
.path
.join('tools', 'perf', 'generate_profile')
1280 if arg_dict
.has_key('--profile-dir') and arg_dict
.has_key('--browser'):
1281 profile_path
, profile_type
= os
.path
.split(arg_dict
['--profile-dir'])
1282 return not RunProcess(['python', path_to_generate
,
1283 '--profile-type-to-generate', profile_type
,
1284 '--browser', arg_dict
['--browser'], '--output-dir', profile_path
])
1288 def RunPerformanceTestAndParseResults(self
, command_to_run
, metric
,
1289 reset_on_first_run
=False, upload_on_last_run
=False, results_label
=None):
1290 """Runs a performance test on the current revision by executing the
1291 'command_to_run' and parses the results.
1294 command_to_run: The command to be run to execute the performance test.
1295 metric: The metric to parse out from the results of the performance test.
1298 On success, it will return a tuple of the average value of the metric,
1299 and a success code of 0.
1302 if self
.opts
.debug_ignore_perf_test
:
1303 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1306 command_to_run
= command_to_run
.replace('/', r
'\\')
1308 args
= shlex
.split(command_to_run
)
1310 if not self
._GenerateProfileIfNecessary
(args
):
1311 return ('Failed to generate profile for performance test.', -1)
1313 # If running a telemetry test for cros, insert the remote ip, and
1314 # identity parameters.
1315 is_telemetry
= bisect_utils
.IsTelemetryCommand(command_to_run
)
1316 if self
.opts
.target_platform
== 'cros' and is_telemetry
:
1317 args
.append('--remote=%s' % self
.opts
.cros_remote_ip
)
1318 args
.append('--identity=%s' % CROS_TEST_KEY_PATH
)
1321 os
.chdir(self
.src_cwd
)
1323 start_time
= time
.time()
1326 output_of_all_runs
= ''
1327 for i
in xrange(self
.opts
.repeat_test_count
):
1328 # Can ignore the return code since if the tests fail, it won't return 0.
1330 current_args
= copy
.copy(args
)
1332 if i
== 0 and reset_on_first_run
:
1333 current_args
.append('--reset-results')
1334 elif i
== self
.opts
.repeat_test_count
- 1 and upload_on_last_run
:
1335 current_args
.append('--upload-results')
1337 current_args
.append('--results-label=%s' % results_label
)
1338 (output
, return_code
) = RunProcessAndRetrieveOutput(current_args
)
1340 if e
.errno
== errno
.ENOENT
:
1341 err_text
= ("Something went wrong running the performance test. "
1342 "Please review the command line:\n\n")
1343 if 'src/' in ' '.join(args
):
1344 err_text
+= ("Check that you haven't accidentally specified a path "
1345 "with src/ in the command.\n\n")
1346 err_text
+= ' '.join(args
)
1349 return (err_text
, -1)
1352 output_of_all_runs
+= output
1353 if self
.opts
.output_buildbot_annotations
:
1356 metric_values
+= self
.ParseMetricValuesFromOutput(metric
, output
)
1358 elapsed_minutes
= (time
.time() - start_time
) / 60.0
1360 if elapsed_minutes
>= self
.opts
.max_time_minutes
or not metric_values
:
1365 # Need to get the average value if there were multiple values.
1367 truncated_mean
= CalculateTruncatedMean(metric_values
,
1368 self
.opts
.truncate_percent
)
1369 standard_err
= CalculateStandardError(metric_values
)
1370 standard_dev
= CalculateStandardDeviation(metric_values
)
1373 'mean': truncated_mean
,
1374 'std_err': standard_err
,
1375 'std_dev': standard_dev
,
1376 'values': metric_values
,
1379 print 'Results of performance test: %12f %12f' % (
1380 truncated_mean
, standard_err
)
1382 return (values
, 0, output_of_all_runs
)
1384 return ('Invalid metric specified, or no values returned from '
1385 'performance test.', -1, output_of_all_runs
)
1387 def FindAllRevisionsToSync(self
, revision
, depot
):
1388 """Finds all dependant revisions and depots that need to be synced for a
1389 given revision. This is only useful in the git workflow, as an svn depot
1390 may be split into multiple mirrors.
1392 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1393 skia/include. To sync skia/src properly, one has to find the proper
1394 revisions in skia/gyp and skia/include.
1397 revision: The revision to sync to.
1398 depot: The depot in use at the moment (probably skia).
1401 A list of [depot, revision] pairs that need to be synced.
1403 revisions_to_sync
= [[depot
, revision
]]
1405 is_base
= ((depot
== 'chromium') or (depot
== 'cros') or
1406 (depot
== 'android-chrome'))
1408 # Some SVN depots were split into multiple git depots, so we need to
1409 # figure out for each mirror which git revision to grab. There's no
1410 # guarantee that the SVN revision will exist for each of the dependant
1411 # depots, so we have to grep the git logs and grab the next earlier one.
1413 DEPOT_DEPS_NAME
[depot
]['depends'] and\
1414 self
.source_control
.IsGit():
1415 svn_rev
= self
.source_control
.SVNFindRev(revision
)
1417 for d
in DEPOT_DEPS_NAME
[depot
]['depends']:
1418 self
.ChangeToDepotWorkingDirectory(d
)
1420 dependant_rev
= self
.source_control
.ResolveToRevision(svn_rev
, d
, -1000)
1423 revisions_to_sync
.append([d
, dependant_rev
])
1425 num_resolved
= len(revisions_to_sync
)
1426 num_needed
= len(DEPOT_DEPS_NAME
[depot
]['depends'])
1428 self
.ChangeToDepotWorkingDirectory(depot
)
1430 if not ((num_resolved
- 1) == num_needed
):
1433 return revisions_to_sync
1435 def PerformPreBuildCleanup(self
):
1436 """Performs necessary cleanup between runs."""
1437 print 'Cleaning up between runs.'
1440 # Having these pyc files around between runs can confuse the
1441 # perf tests and cause them to crash.
1442 for (path
, dir, files
) in os
.walk(self
.src_cwd
):
1443 for cur_file
in files
:
1444 if cur_file
.endswith('.pyc'):
1445 path_to_file
= os
.path
.join(path
, cur_file
)
1446 os
.remove(path_to_file
)
1448 def PerformWebkitDirectoryCleanup(self
, revision
):
1449 """If the script is switching between Blink and WebKit during bisect,
1450 its faster to just delete the directory rather than leave it up to git
1456 if not self
.source_control
.CheckoutFileAtRevision(
1457 bisect_utils
.FILE_DEPS_GIT
, revision
, cwd
=self
.src_cwd
):
1461 os
.chdir(self
.src_cwd
)
1463 is_blink
= bisect_utils
.IsDepsFileBlink()
1467 if not self
.source_control
.RevertFileToHead(
1468 bisect_utils
.FILE_DEPS_GIT
):
1471 if self
.was_blink
!= is_blink
:
1472 self
.was_blink
= is_blink
1473 return bisect_utils
.RemoveThirdPartyWebkitDirectory()
1476 def PerformCrosChrootCleanup(self
):
1477 """Deletes the chroot.
1483 self
.ChangeToDepotWorkingDirectory('cros')
1484 cmd
= [CROS_SDK_PATH
, '--delete']
1485 return_code
= RunProcess(cmd
)
1487 return not return_code
1489 def CreateCrosChroot(self
):
1490 """Creates a new chroot.
1496 self
.ChangeToDepotWorkingDirectory('cros')
1497 cmd
= [CROS_SDK_PATH
, '--create']
1498 return_code
= RunProcess(cmd
)
1500 return not return_code
1502 def PerformPreSyncCleanup(self
, revision
, depot
):
1503 """Performs any necessary cleanup before syncing.
1508 if depot
== 'chromium':
1509 if not bisect_utils
.RemoveThirdPartyLibjingleDirectory():
1511 return self
.PerformWebkitDirectoryCleanup(revision
)
1512 elif depot
== 'cros':
1513 return self
.PerformCrosChrootCleanup()
1516 def RunPostSync(self
, depot
):
1517 """Performs any work after syncing.
1522 if self
.opts
.target_platform
== 'android':
1523 if not bisect_utils
.SetupAndroidBuildEnvironment(self
.opts
,
1524 path_to_src
=self
.src_cwd
):
1528 return self
.CreateCrosChroot()
1530 return self
.RunGClientHooks()
1533 def ShouldSkipRevision(self
, depot
, revision
):
1534 """Some commits can be safely skipped (such as a DEPS roll), since the tool
1535 is git based those changes would have no effect.
1538 depot: The depot being bisected.
1539 revision: Current revision we're synced to.
1542 True if we should skip building/testing this revision.
1544 if depot
== 'chromium':
1545 if self
.source_control
.IsGit():
1546 cmd
= ['diff-tree', '--no-commit-id', '--name-only', '-r', revision
]
1547 output
= CheckRunGit(cmd
)
1549 files
= output
.splitlines()
1551 if len(files
) == 1 and files
[0] == 'DEPS':
1556 def SyncBuildAndRunRevision(self
, revision
, depot
, command_to_run
, metric
,
1558 """Performs a full sync/build/run of the specified revision.
1561 revision: The revision to sync to.
1562 depot: The depot that's being used at the moment (src, webkit, etc.)
1563 command_to_run: The command to execute the performance test.
1564 metric: The performance metric being tested.
1567 On success, a tuple containing the results of the performance test.
1568 Otherwise, a tuple with the error message.
1571 if depot
== 'chromium' or depot
== 'android-chrome':
1572 sync_client
= 'gclient'
1573 elif depot
== 'cros':
1574 sync_client
= 'repo'
1576 revisions_to_sync
= self
.FindAllRevisionsToSync(revision
, depot
)
1578 if not revisions_to_sync
:
1579 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL
)
1581 if not self
.PerformPreSyncCleanup(revision
, depot
):
1582 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL
)
1586 if not self
.opts
.debug_ignore_sync
:
1587 for r
in revisions_to_sync
:
1588 self
.ChangeToDepotWorkingDirectory(r
[0])
1591 self
.PerformPreBuildCleanup()
1593 # If you're using gclient to sync, you need to specify the depot you
1594 # want so that all the dependencies sync properly as well.
1595 # ie. gclient sync src@<SHA1>
1596 current_revision
= r
[1]
1597 if sync_client
== 'gclient':
1598 current_revision
= '%s@%s' % (DEPOT_DEPS_NAME
[depot
]['src'],
1600 if not self
.source_control
.SyncToRevision(current_revision
,
1607 success
= self
.RunPostSync(depot
)
1610 if skippable
and self
.ShouldSkipRevision(depot
, revision
):
1611 return ('Skipped revision: [%s]' % str(revision
),
1612 BUILD_RESULT_SKIPPED
)
1614 start_build_time
= time
.time()
1615 if self
.BuildCurrentRevision(depot
):
1616 after_build_time
= time
.time()
1617 results
= self
.RunPerformanceTestAndParseResults(command_to_run
,
1621 external_revisions
= self
.Get3rdPartyRevisionsFromCurrentRevision(
1624 if not external_revisions
is None:
1625 return (results
[0], results
[1], external_revisions
,
1626 time
.time() - after_build_time
, time
.time() -
1629 return ('Failed to parse DEPS file for external revisions.',
1634 return ('Failed to build revision: [%s]' % (str(revision
, )),
1637 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL
)
1639 return ('Failed to sync revision: [%s]' % (str(revision
, )),
1642 def CheckIfRunPassed(self
, current_value
, known_good_value
, known_bad_value
):
1643 """Given known good and bad values, decide if the current_value passed
1647 current_value: The value of the metric being checked.
1648 known_bad_value: The reference value for a "failed" run.
1649 known_good_value: The reference value for a "passed" run.
1652 True if the current_value is closer to the known_good_value than the
1655 dist_to_good_value
= abs(current_value
['mean'] - known_good_value
['mean'])
1656 dist_to_bad_value
= abs(current_value
['mean'] - known_bad_value
['mean'])
1658 return dist_to_good_value
< dist_to_bad_value
1660 def _GetDepotDirectory(self
, depot_name
):
1661 if depot_name
== 'chromium':
1663 elif depot_name
== 'cros':
1664 return self
.cros_cwd
1665 elif depot_name
in DEPOT_NAMES
:
1666 return self
.depot_cwd
[depot_name
]
1668 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
1669 ' was added without proper support?' %\
1672 def ChangeToDepotWorkingDirectory(self
, depot_name
):
1673 """Given a depot, changes to the appropriate working directory.
1676 depot_name: The name of the depot (see DEPOT_NAMES).
1678 os
.chdir(self
._GetDepotDirectory
(depot_name
))
1680 def _FillInV8BleedingEdgeInfo(self
, min_revision_data
, max_revision_data
):
1681 r1
= self
._GetNearestV
8BleedingEdgeFromTrunk
(min_revision_data
['revision'],
1682 search_forward
=True)
1683 r2
= self
._GetNearestV
8BleedingEdgeFromTrunk
(max_revision_data
['revision'],
1684 search_forward
=False)
1685 min_revision_data
['external']['v8_bleeding_edge'] = r1
1686 max_revision_data
['external']['v8_bleeding_edge'] = r2
1688 if (not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
1689 min_revision_data
['revision']) or
1690 not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
1691 max_revision_data
['revision'])):
1692 self
.warnings
.append('Trunk revisions in V8 did not map directly to '
1693 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
1694 'did map directly to bleeding_edge revisions, but results might not '
1697 def _FindNextDepotToBisect(self
, current_depot
, current_revision
,
1698 min_revision_data
, max_revision_data
):
1699 """Given the state of the bisect, decides which depot the script should
1700 dive into next (if any).
1703 current_depot: Current depot being bisected.
1704 current_revision: Current revision synced to.
1705 min_revision_data: Data about the earliest revision in the bisect range.
1706 max_revision_data: Data about the latest revision in the bisect range.
1709 The depot to bisect next, or None.
1711 external_depot
= None
1712 for next_depot
in DEPOT_NAMES
:
1713 if DEPOT_DEPS_NAME
[next_depot
].has_key('platform'):
1714 if DEPOT_DEPS_NAME
[next_depot
]['platform'] != os
.name
:
1717 if not (DEPOT_DEPS_NAME
[next_depot
]["recurse"] and
1718 min_revision_data
['depot'] in DEPOT_DEPS_NAME
[next_depot
]['from']):
1721 if current_depot
== 'v8':
1722 # We grab the bleeding_edge info here rather than earlier because we
1723 # finally have the revision range. From that we can search forwards and
1724 # backwards to try to match trunk revisions to bleeding_edge.
1725 self
._FillInV
8BleedingEdgeInfo
(min_revision_data
, max_revision_data
)
1727 if (min_revision_data
['external'][next_depot
] ==
1728 max_revision_data
['external'][next_depot
]):
1731 if (min_revision_data
['external'][next_depot
] and
1732 max_revision_data
['external'][next_depot
]):
1733 external_depot
= next_depot
1736 return external_depot
1738 def PrepareToBisectOnDepot(self
,
1744 """Changes to the appropriate directory and gathers a list of revisions
1745 to bisect between |start_revision| and |end_revision|.
1748 current_depot: The depot we want to bisect.
1749 end_revision: End of the revision range.
1750 start_revision: Start of the revision range.
1751 previous_depot: The depot we were previously bisecting.
1752 previous_revision: The last revision we synced to on |previous_depot|.
1755 A list containing the revisions between |start_revision| and
1756 |end_revision| inclusive.
1758 # Change into working directory of external library to run
1759 # subsequent commands.
1760 self
.ChangeToDepotWorkingDirectory(current_depot
)
1762 # V8 (and possibly others) is merged in periodically. Bisecting
1763 # this directory directly won't give much good info.
1764 if DEPOT_DEPS_NAME
[current_depot
].has_key('custom_deps'):
1765 config_path
= os
.path
.join(self
.src_cwd
, '..')
1766 if bisect_utils
.RunGClientAndCreateConfig(self
.opts
,
1767 DEPOT_DEPS_NAME
[current_depot
]['custom_deps'], cwd
=config_path
):
1769 if bisect_utils
.RunGClient(
1770 ['sync', '--revision', previous_revision
], cwd
=self
.src_cwd
):
1773 if current_depot
== 'v8_bleeding_edge':
1774 self
.ChangeToDepotWorkingDirectory('chromium')
1776 shutil
.move('v8', 'v8.bak')
1777 shutil
.move('v8_bleeding_edge', 'v8')
1779 self
.cleanup_commands
.append(['mv', 'v8', 'v8_bleeding_edge'])
1780 self
.cleanup_commands
.append(['mv', 'v8.bak', 'v8'])
1782 self
.depot_cwd
['v8_bleeding_edge'] = os
.path
.join(self
.src_cwd
, 'v8')
1783 self
.depot_cwd
['v8'] = os
.path
.join(self
.src_cwd
, 'v8.bak')
1785 self
.ChangeToDepotWorkingDirectory(current_depot
)
1787 depot_revision_list
= self
.GetRevisionList(current_depot
,
1791 self
.ChangeToDepotWorkingDirectory('chromium')
1793 return depot_revision_list
1795 def GatherReferenceValues(self
, good_rev
, bad_rev
, cmd
, metric
, target_depot
):
1796 """Gathers reference values by running the performance tests on the
1797 known good and bad revisions.
1800 good_rev: The last known good revision where the performance regression
1801 has not occurred yet.
1802 bad_rev: A revision where the performance regression has already occurred.
1803 cmd: The command to execute the performance test.
1804 metric: The metric being tested for regression.
1807 A tuple with the results of building and running each revision.
1809 bad_run_results
= self
.SyncBuildAndRunRevision(bad_rev
,
1814 good_run_results
= None
1816 if not bad_run_results
[1]:
1817 good_run_results
= self
.SyncBuildAndRunRevision(good_rev
,
1822 return (bad_run_results
, good_run_results
)
1824 def AddRevisionsIntoRevisionData(self
, revisions
, depot
, sort
, revision_data
):
1825 """Adds new revisions to the revision_data dict and initializes them.
1828 revisions: List of revisions to add.
1829 depot: Depot that's currently in use (src, webkit, etc...)
1830 sort: Sorting key for displaying revisions.
1831 revision_data: A dict to add the new revisions into. Existing revisions
1832 will have their sort keys offset.
1835 num_depot_revisions
= len(revisions
)
1837 for k
, v
in revision_data
.iteritems():
1838 if v
['sort'] > sort
:
1839 v
['sort'] += num_depot_revisions
1841 for i
in xrange(num_depot_revisions
):
1844 revision_data
[r
] = {'revision' : r
,
1850 'sort' : i
+ sort
+ 1}
1852 def PrintRevisionsToBisectMessage(self
, revision_list
, depot
):
1853 if self
.opts
.output_buildbot_annotations
:
1854 step_name
= 'Bisection Range: [%s - %s]' % (
1855 revision_list
[len(revision_list
)-1], revision_list
[0])
1856 bisect_utils
.OutputAnnotationStepStart(step_name
)
1859 print 'Revisions to bisect on [%s]:' % depot
1860 for revision_id
in revision_list
:
1861 print ' -> %s' % (revision_id
, )
1864 if self
.opts
.output_buildbot_annotations
:
1865 bisect_utils
.OutputAnnotationStepClosed()
1867 def NudgeRevisionsIfDEPSChange(self
, bad_revision
, good_revision
):
1868 """Checks to see if changes to DEPS file occurred, and that the revision
1869 range also includes the change to .DEPS.git. If it doesn't, attempts to
1870 expand the revision range to include it.
1873 bad_rev: First known bad revision.
1874 good_revision: Last known good revision.
1877 A tuple with the new bad and good revisions.
1879 if self
.source_control
.IsGit() and self
.opts
.target_platform
== 'chromium':
1880 changes_to_deps
= self
.source_control
.QueryFileRevisionHistory(
1881 'DEPS', good_revision
, bad_revision
)
1884 # DEPS file was changed, search from the oldest change to DEPS file to
1885 # bad_revision to see if there are matching .DEPS.git changes.
1886 oldest_deps_change
= changes_to_deps
[-1]
1887 changes_to_gitdeps
= self
.source_control
.QueryFileRevisionHistory(
1888 bisect_utils
.FILE_DEPS_GIT
, oldest_deps_change
, bad_revision
)
1890 if len(changes_to_deps
) != len(changes_to_gitdeps
):
1891 # Grab the timestamp of the last DEPS change
1892 cmd
= ['log', '--format=%ct', '-1', changes_to_deps
[0]]
1893 output
= CheckRunGit(cmd
)
1894 commit_time
= int(output
)
1896 # Try looking for a commit that touches the .DEPS.git file in the
1897 # next 15 minutes after the DEPS file change.
1898 cmd
= ['log', '--format=%H', '-1',
1899 '--before=%d' % (commit_time
+ 900), '--after=%d' % commit_time
,
1900 'origin/master', bisect_utils
.FILE_DEPS_GIT
]
1901 output
= CheckRunGit(cmd
)
1902 output
= output
.strip()
1904 self
.warnings
.append('Detected change to DEPS and modified '
1905 'revision range to include change to .DEPS.git')
1906 return (output
, good_revision
)
1908 self
.warnings
.append('Detected change to DEPS but couldn\'t find '
1909 'matching change to .DEPS.git')
1910 return (bad_revision
, good_revision
)
1912 def CheckIfRevisionsInProperOrder(self
,
1916 """Checks that |good_revision| is an earlier revision than |bad_revision|.
1919 good_revision: Number/tag of the known good revision.
1920 bad_revision: Number/tag of the known bad revision.
1923 True if the revisions are in the proper order (good earlier than bad).
1925 if self
.source_control
.IsGit() and target_depot
!= 'cros':
1926 cmd
= ['log', '--format=%ct', '-1', good_revision
]
1927 cwd
= self
._GetDepotDirectory
(target_depot
)
1929 output
= CheckRunGit(cmd
, cwd
=cwd
)
1930 good_commit_time
= int(output
)
1932 cmd
= ['log', '--format=%ct', '-1', bad_revision
]
1933 output
= CheckRunGit(cmd
, cwd
=cwd
)
1934 bad_commit_time
= int(output
)
1936 return good_commit_time
<= bad_commit_time
1938 # Cros/svn use integers
1939 return int(good_revision
) <= int(bad_revision
)
1941 def Run(self
, command_to_run
, bad_revision_in
, good_revision_in
, metric
):
1942 """Given known good and bad revisions, run a binary search on all
1943 intermediate revisions to determine the CL where the performance regression
1947 command_to_run: Specify the command to execute the performance test.
1948 good_revision: Number/tag of the known good revision.
1949 bad_revision: Number/tag of the known bad revision.
1950 metric: The performance metric to monitor.
1953 A dict with 2 members, 'revision_data' and 'error'. On success,
1954 'revision_data' will contain a dict mapping revision ids to
1955 data about that revision. Each piece of revision data consists of a
1956 dict with the following keys:
1958 'passed': Represents whether the performance test was successful at
1959 that revision. Possible values include: 1 (passed), 0 (failed),
1960 '?' (skipped), 'F' (build failed).
1961 'depot': The depot that this revision is from (ie. WebKit)
1962 'external': If the revision is a 'src' revision, 'external' contains
1963 the revisions of each of the external libraries.
1964 'sort': A sort value for sorting the dict in order of commits.
1981 If an error occurred, the 'error' field will contain the message and
1982 'revision_data' will be empty.
1985 results
= {'revision_data' : {},
1988 # Choose depot to bisect first
1989 target_depot
= 'chromium'
1990 if self
.opts
.target_platform
== 'cros':
1991 target_depot
= 'cros'
1992 elif self
.opts
.target_platform
== 'android-chrome':
1993 target_depot
= 'android-chrome'
1996 self
.ChangeToDepotWorkingDirectory(target_depot
)
1998 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
1999 bad_revision
= self
.source_control
.ResolveToRevision(bad_revision_in
,
2001 good_revision
= self
.source_control
.ResolveToRevision(good_revision_in
,
2007 if bad_revision
is None:
2008 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in
,)
2011 if good_revision
is None:
2012 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in
,)
2015 # Check that they didn't accidentally swap good and bad revisions.
2016 if not self
.CheckIfRevisionsInProperOrder(
2017 target_depot
, good_revision
, bad_revision
):
2018 results
['error'] = 'bad_revision < good_revision, did you swap these '\
2022 (bad_revision
, good_revision
) = self
.NudgeRevisionsIfDEPSChange(
2023 bad_revision
, good_revision
)
2025 if self
.opts
.output_buildbot_annotations
:
2026 bisect_utils
.OutputAnnotationStepStart('Gathering Revisions')
2028 print 'Gathering revision range for bisection.'
2030 # Retrieve a list of revisions to do bisection on.
2031 src_revision_list
= self
.GetRevisionList(target_depot
,
2035 if self
.opts
.output_buildbot_annotations
:
2036 bisect_utils
.OutputAnnotationStepClosed()
2038 if src_revision_list
:
2039 # revision_data will store information about a revision such as the
2040 # depot it came from, the webkit/V8 revision at that time,
2041 # performance timing, build state, etc...
2042 revision_data
= results
['revision_data']
2044 # revision_list is the list we're binary searching through at the moment.
2049 for current_revision_id
in src_revision_list
:
2052 revision_data
[current_revision_id
] = {'value' : None,
2054 'depot' : target_depot
,
2058 'sort' : sort_key_ids
}
2059 revision_list
.append(current_revision_id
)
2062 max_revision
= len(revision_list
) - 1
2064 self
.PrintRevisionsToBisectMessage(revision_list
, target_depot
)
2066 if self
.opts
.output_buildbot_annotations
:
2067 bisect_utils
.OutputAnnotationStepStart('Gathering Reference Values')
2069 print 'Gathering reference values for bisection.'
2071 # Perform the performance tests on the good and bad revisions, to get
2073 (bad_results
, good_results
) = self
.GatherReferenceValues(good_revision
,
2079 if self
.opts
.output_buildbot_annotations
:
2080 bisect_utils
.OutputAnnotationStepClosed()
2083 results
['error'] = ('An error occurred while building and running '
2084 'the \'bad\' reference value. The bisect cannot continue without '
2085 'a working \'bad\' revision to start from.\n\nError: %s' %
2090 results
['error'] = ('An error occurred while building and running '
2091 'the \'good\' reference value. The bisect cannot continue without '
2092 'a working \'good\' revision to start from.\n\nError: %s' %
2097 # We need these reference values to determine if later runs should be
2098 # classified as pass or fail.
2099 known_bad_value
= bad_results
[0]
2100 known_good_value
= good_results
[0]
2102 # Can just mark the good and bad revisions explicitly here since we
2103 # already know the results.
2104 bad_revision_data
= revision_data
[revision_list
[0]]
2105 bad_revision_data
['external'] = bad_results
[2]
2106 bad_revision_data
['perf_time'] = bad_results
[3]
2107 bad_revision_data
['build_time'] = bad_results
[4]
2108 bad_revision_data
['passed'] = False
2109 bad_revision_data
['value'] = known_bad_value
2111 good_revision_data
= revision_data
[revision_list
[max_revision
]]
2112 good_revision_data
['external'] = good_results
[2]
2113 good_revision_data
['perf_time'] = good_results
[3]
2114 good_revision_data
['build_time'] = good_results
[4]
2115 good_revision_data
['passed'] = True
2116 good_revision_data
['value'] = known_good_value
2118 next_revision_depot
= target_depot
2121 if not revision_list
:
2124 min_revision_data
= revision_data
[revision_list
[min_revision
]]
2125 max_revision_data
= revision_data
[revision_list
[max_revision
]]
2127 if max_revision
- min_revision
<= 1:
2128 current_depot
= min_revision_data
['depot']
2129 if min_revision_data
['passed'] == '?':
2130 next_revision_index
= min_revision
2131 elif max_revision_data
['passed'] == '?':
2132 next_revision_index
= max_revision
2133 elif current_depot
in ['android-chrome', 'cros', 'chromium', 'v8']:
2134 previous_revision
= revision_list
[min_revision
]
2135 # If there were changes to any of the external libraries we track,
2136 # should bisect the changes there as well.
2137 external_depot
= self
._FindNextDepotToBisect
(current_depot
,
2138 previous_revision
, min_revision_data
, max_revision_data
)
2140 # If there was no change in any of the external depots, the search
2142 if not external_depot
:
2143 if current_depot
== 'v8':
2144 self
.warnings
.append('Unfortunately, V8 bisection couldn\'t '
2145 'continue any further. The script can only bisect into '
2146 'V8\'s bleeding_edge repository if both the current and '
2147 'previous revisions in trunk map directly to revisions in '
2151 earliest_revision
= max_revision_data
['external'][external_depot
]
2152 latest_revision
= min_revision_data
['external'][external_depot
]
2154 new_revision_list
= self
.PrepareToBisectOnDepot(external_depot
,
2157 next_revision_depot
,
2160 if not new_revision_list
:
2161 results
['error'] = 'An error occurred attempting to retrieve'\
2162 ' revision range: [%s..%s]' %\
2163 (earliest_revision
, latest_revision
)
2166 self
.AddRevisionsIntoRevisionData(new_revision_list
,
2168 min_revision_data
['sort'],
2171 # Reset the bisection and perform it on the newly inserted
2173 revision_list
= new_revision_list
2175 max_revision
= len(revision_list
) - 1
2176 sort_key_ids
+= len(revision_list
)
2178 print 'Regression in metric:%s appears to be the result of changes'\
2179 ' in [%s].' % (metric
, external_depot
)
2181 self
.PrintRevisionsToBisectMessage(revision_list
, external_depot
)
2187 next_revision_index
= int((max_revision
- min_revision
) / 2) +\
2190 next_revision_id
= revision_list
[next_revision_index
]
2191 next_revision_data
= revision_data
[next_revision_id
]
2192 next_revision_depot
= next_revision_data
['depot']
2194 self
.ChangeToDepotWorkingDirectory(next_revision_depot
)
2196 if self
.opts
.output_buildbot_annotations
:
2197 step_name
= 'Working on [%s]' % next_revision_id
2198 bisect_utils
.OutputAnnotationStepStart(step_name
)
2200 print 'Working on revision: [%s]' % next_revision_id
2202 run_results
= self
.SyncBuildAndRunRevision(next_revision_id
,
2203 next_revision_depot
,
2205 metric
, skippable
=True)
2207 # If the build is successful, check whether or not the metric
2209 if not run_results
[1]:
2210 if len(run_results
) > 2:
2211 next_revision_data
['external'] = run_results
[2]
2212 next_revision_data
['perf_time'] = run_results
[3]
2213 next_revision_data
['build_time'] = run_results
[4]
2215 passed_regression
= self
.CheckIfRunPassed(run_results
[0],
2219 next_revision_data
['passed'] = passed_regression
2220 next_revision_data
['value'] = run_results
[0]
2222 if passed_regression
:
2223 max_revision
= next_revision_index
2225 min_revision
= next_revision_index
2227 if run_results
[1] == BUILD_RESULT_SKIPPED
:
2228 next_revision_data
['passed'] = 'Skipped'
2229 elif run_results
[1] == BUILD_RESULT_FAIL
:
2230 next_revision_data
['passed'] = 'Build Failed'
2232 print run_results
[0]
2234 # If the build is broken, remove it and redo search.
2235 revision_list
.pop(next_revision_index
)
2239 if self
.opts
.output_buildbot_annotations
:
2240 bisect_utils
.OutputAnnotationStepClosed()
2242 # Weren't able to sync and retrieve the revision range.
2243 results
['error'] = 'An error occurred attempting to retrieve revision '\
2244 'range: [%s..%s]' % (good_revision
, bad_revision
)
2248 def _PrintConfidence(self
, results_dict
):
2249 # The perf dashboard specifically looks for the string
2250 # "Confidence in Bisection Results: 100%" to decide whether or not
2251 # to cc the author(s). If you change this, please update the perf
2252 # dashboard as well.
2253 print 'Confidence in Bisection Results: %d%%' % results_dict
['confidence']
2255 def _PrintBanner(self
, results_dict
):
2257 print " __o_\___ Aw Snap! We hit a speed bump!"
2258 print "=-O----O-'__.~.___________________________________"
2260 print 'Bisect reproduced a %.02f%% (+-%.02f%%) change in the %s metric.' % (
2261 results_dict
['regression_size'], results_dict
['regression_std_err'],
2262 '/'.join(self
.opts
.metric
))
2263 self
._PrintConfidence
(results_dict
)
2265 def _PrintFailedBanner(self
, results_dict
):
2267 print ('Bisect could not reproduce a change in the '
2268 '%s/%s metric.' % (self
.opts
.metric
[0], self
.opts
.metric
[1]))
2270 self
._PrintConfidence
(results_dict
)
2272 def _PrintRevisionInfo(self
, cl
, info
, depot
=None):
2273 # The perf dashboard specifically looks for the string
2274 # "Author : " to parse out who to cc on a bug. If you change the
2275 # formatting here, please update the perf dashboard as well.
2277 print 'Subject : %s' % info
['subject']
2278 print 'Author : %s' % info
['author']
2279 if not info
['email'].startswith(info
['author']):
2280 print 'Email : %s' % info
['email']
2281 if depot
and DEPOT_DEPS_NAME
[depot
].has_key('viewvc'):
2283 # Format is "git-svn-id: svn://....@123456 <other data>"
2284 svn_line
= [i
for i
in info
['body'].splitlines() if 'git-svn-id:' in i
]
2285 svn_revision
= svn_line
[0].split('@')
2286 svn_revision
= svn_revision
[1].split(' ')[0]
2287 print 'Link : %s' % DEPOT_DEPS_NAME
[depot
]['viewvc'] + svn_revision
2290 print 'Failed to parse svn revision from body:'
2294 print 'Commit : %s' % cl
2295 print 'Date : %s' % info
['date']
2297 def _PrintTestedCommitsTable(self
, revision_data_sorted
,
2298 first_working_revision
, last_broken_revision
, confidence
):
2300 print 'Tested commits:'
2301 print ' %20s %40s %12s %14s %13s' % ('Depot'.center(20, ' '),
2302 'Commit SHA'.center(40, ' '), 'Mean'.center(12, ' '),
2303 'Std. Error'.center(14, ' '), 'State'.center(13, ' '))
2305 for current_id
, current_data
in revision_data_sorted
:
2306 if current_data
['value']:
2307 if (current_id
== last_broken_revision
or
2308 current_id
== first_working_revision
):
2309 # If confidence is too low, don't add this empty line since it's
2310 # used to put focus on a suspected CL.
2317 state_str
= 'Suspected CL'
2321 # If confidence is too low, don't bother outputting good/bad.
2324 state_str
= state_str
.center(13, ' ')
2326 std_error
= ('+-%.02f' %
2327 current_data
['value']['std_err']).center(14, ' ')
2328 mean
= ('%.02f' % current_data
['value']['mean']).center(12, ' ')
2329 print ' %20s %40s %12s %14s %13s' % (
2330 current_data
['depot'].center(20, ' '), current_id
, mean
,
2331 std_error
, state_str
)
2333 def _PrintReproSteps(self
):
2335 print 'To reproduce locally:'
2336 print '$ ' + self
.opts
.command
2337 if bisect_utils
.IsTelemetryCommand(self
.opts
.command
):
2339 print 'Also consider passing --profiler=list to see available profilers.'
2341 def _PrintOtherRegressions(self
, other_regressions
, revision_data
):
2343 print 'Other regressions may have occurred:'
2344 for regression
in other_regressions
:
2345 current_id
, previous_id
, percent_change
, deviations
= regression
2346 current_data
= revision_data
[current_id
]
2347 previous_data
= revision_data
[previous_id
]
2349 if deviations
is None:
2352 deviations
= '%.2f' % deviations
2354 if percent_change
is None:
2357 print ' %8s %s [%.2f%%, %s x std.dev]' % (
2358 previous_data
['depot'], previous_id
, 100 * percent_change
, deviations
)
2359 print ' %8s %s' % (current_data
['depot'], current_id
)
2362 def _PrintStepTime(self
, revision_data_sorted
):
2363 step_perf_time_avg
= 0.0
2364 step_build_time_avg
= 0.0
2366 for _
, current_data
in revision_data_sorted
:
2367 step_perf_time_avg
+= current_data
['perf_time']
2368 step_build_time_avg
+= current_data
['build_time']
2371 step_perf_time_avg
= step_perf_time_avg
/ step_count
2372 step_build_time_avg
= step_build_time_avg
/ step_count
2374 print 'Average build time : %s' % datetime
.timedelta(
2375 seconds
=int(step_build_time_avg
))
2376 print 'Average test time : %s' % datetime
.timedelta(
2377 seconds
=int(step_perf_time_avg
))
2379 def _PrintWarnings(self
):
2380 if not self
.warnings
:
2384 for w
in self
.warnings
:
2387 def _GetResultsDict(self
, revision_data
, revision_data_sorted
):
2388 # Find range where it possibly broke.
2389 first_working_revision
= None
2390 first_working_revision_index
= -1
2391 last_broken_revision
= None
2392 last_broken_revision_index
= -1
2394 for i
in xrange(len(revision_data_sorted
)):
2395 k
, v
= revision_data_sorted
[i
]
2396 if v
['passed'] == 1:
2397 if not first_working_revision
:
2398 first_working_revision
= k
2399 first_working_revision_index
= i
2402 last_broken_revision
= k
2403 last_broken_revision_index
= i
2405 if last_broken_revision
!= None and first_working_revision
!= None:
2406 bounds_broken
= [revision_data
[last_broken_revision
]['value']['mean'],
2407 revision_data
[last_broken_revision
]['value']['mean']]
2409 for i
in xrange(0, last_broken_revision_index
+ 1):
2410 if revision_data_sorted
[i
][1]['value']:
2411 bounds_broken
[0] = min(bounds_broken
[0],
2412 revision_data_sorted
[i
][1]['value']['mean'])
2413 bounds_broken
[1] = max(bounds_broken
[1],
2414 revision_data_sorted
[i
][1]['value']['mean'])
2415 broken_mean
.extend(revision_data_sorted
[i
][1]['value']['values'])
2417 bounds_working
= [revision_data
[first_working_revision
]['value']['mean'],
2418 revision_data
[first_working_revision
]['value']['mean']]
2420 for i
in xrange(first_working_revision_index
, len(revision_data_sorted
)):
2421 if revision_data_sorted
[i
][1]['value']:
2422 bounds_working
[0] = min(bounds_working
[0],
2423 revision_data_sorted
[i
][1]['value']['mean'])
2424 bounds_working
[1] = max(bounds_working
[1],
2425 revision_data_sorted
[i
][1]['value']['mean'])
2426 working_mean
.extend(revision_data_sorted
[i
][1]['value']['values'])
2428 # Calculate the approximate size of the regression
2429 mean_of_bad_runs
= CalculateTruncatedMean(broken_mean
, 0.0)
2430 mean_of_good_runs
= CalculateTruncatedMean(working_mean
, 0.0)
2432 regression_size
= math
.fabs(max(mean_of_good_runs
, mean_of_bad_runs
) /
2433 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0 - 100.0
2435 regression_std_err
= math
.fabs(CalculatePooledStandardError(
2436 [working_mean
, broken_mean
]) /
2437 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0
2439 # Give a "confidence" in the bisect. At the moment we use how distinct the
2440 # values are before and after the last broken revision, and how noisy the
2442 dist_between_groups
= min(math
.fabs(bounds_broken
[1] - bounds_working
[0]),
2443 math
.fabs(bounds_broken
[0] - bounds_working
[1]))
2444 len_working_group
= CalculateStandardDeviation(working_mean
)
2445 len_broken_group
= CalculateStandardDeviation(broken_mean
)
2447 confidence
= (dist_between_groups
/ (
2448 max(0.0001, (len_broken_group
+ len_working_group
))))
2449 confidence
= int(min(1.0, max(confidence
, 0.0)) * 100.0)
2451 culprit_revisions
= []
2454 self
.ChangeToDepotWorkingDirectory(
2455 revision_data
[last_broken_revision
]['depot'])
2457 if revision_data
[last_broken_revision
]['depot'] == 'cros':
2458 # Want to get a list of all the commits and what depots they belong
2459 # to so that we can grab info about each.
2460 cmd
= ['repo', 'forall', '-c',
2461 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2462 last_broken_revision
, first_working_revision
+ 1)]
2463 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
2466 assert not return_code
, 'An error occurred while running'\
2467 ' "%s"' % ' '.join(cmd
)
2470 for l
in output
.split('\n'):
2472 # Output will be in form:
2474 # /path_to_other_depot
2482 contents
= l
.split(' ')
2483 if len(contents
) > 1:
2484 changes
.append([last_depot
, contents
[0]])
2487 info
= self
.source_control
.QueryRevisionInfo(c
[1])
2488 culprit_revisions
.append((c
[1], info
, None))
2490 for i
in xrange(last_broken_revision_index
, len(revision_data_sorted
)):
2491 k
, v
= revision_data_sorted
[i
]
2492 if k
== first_working_revision
:
2494 self
.ChangeToDepotWorkingDirectory(v
['depot'])
2495 info
= self
.source_control
.QueryRevisionInfo(k
)
2496 culprit_revisions
.append((k
, info
, v
['depot']))
2499 # Check for any other possible regression ranges
2500 good_std_dev
= revision_data
[first_working_revision
]['value']['std_err']
2501 good_mean
= revision_data
[first_working_revision
]['value']['mean']
2502 bad_mean
= revision_data
[last_broken_revision
]['value']['mean']
2503 prev_revision_data
= revision_data_sorted
[0][1]
2504 prev_revision_id
= revision_data_sorted
[0][0]
2505 other_regressions
= []
2506 for current_id
, current_data
in revision_data_sorted
:
2507 if current_data
['value']:
2508 prev_mean
= prev_revision_data
['value']['mean']
2509 cur_mean
= current_data
['value']['mean']
2512 deviations
= math
.fabs(prev_mean
- cur_mean
) / good_std_dev
2517 percent_change
= (prev_mean
- cur_mean
) / good_mean
2519 # If the "good" valuse are supposed to be higher than the "bad"
2520 # values (ie. scores), flip the sign of the percent change so that
2521 # a positive value always represents a regression.
2522 if bad_mean
< good_mean
:
2523 percent_change
*= -1.0
2525 percent_change
= None
2527 if deviations
>= 1.5 or percent_change
> 0.01:
2528 if current_id
!= first_working_revision
:
2529 other_regressions
.append(
2530 [current_id
, prev_revision_id
, percent_change
, deviations
])
2531 prev_revision_data
= current_data
2532 prev_revision_id
= current_id
2534 # Check for warnings:
2535 if len(culprit_revisions
) > 1:
2536 self
.warnings
.append('Due to build errors, regression range could '
2537 'not be narrowed down to a single commit.')
2538 if self
.opts
.repeat_test_count
== 1:
2539 self
.warnings
.append('Tests were only set to run once. This may '
2540 'be insufficient to get meaningful results.')
2541 if confidence
< 100:
2543 self
.warnings
.append(
2544 'Confidence is less than 100%. There could be other candidates for '
2545 'this regression. Try bisecting again with increased repeat_count '
2546 'or on a sub-metric that shows the regression more clearly.')
2548 self
.warnings
.append(
2549 'Confidence is 0%. Try bisecting again on another platform, with '
2550 'increased repeat_count or on a sub-metric that shows the regression '
2554 'first_working_revision': first_working_revision
,
2555 'last_broken_revision': last_broken_revision
,
2556 'culprit_revisions': culprit_revisions
,
2557 'other_regressions': other_regressions
,
2558 'regression_size': regression_size
,
2559 'regression_std_err': regression_std_err
,
2560 'confidence': confidence
,
2563 def FormatAndPrintResults(self
, bisect_results
):
2564 """Prints the results from a bisection run in a readable format.
2567 bisect_results: The results from a bisection test run.
2569 revision_data
= bisect_results
['revision_data']
2570 revision_data_sorted
= sorted(revision_data
.iteritems(),
2571 key
= lambda x
: x
[1]['sort'])
2572 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
2574 if self
.opts
.output_buildbot_annotations
:
2575 bisect_utils
.OutputAnnotationStepStart('Build Status Per Revision')
2578 print 'Full results of bisection:'
2579 for current_id
, current_data
in revision_data_sorted
:
2580 build_status
= current_data
['passed']
2582 if type(build_status
) is bool:
2584 build_status
= 'Good'
2586 build_status
= 'Bad'
2588 print ' %20s %40s %s' % (current_data
['depot'],
2589 current_id
, build_status
)
2592 if self
.opts
.output_buildbot_annotations
:
2593 bisect_utils
.OutputAnnotationStepClosed()
2594 # The perf dashboard scrapes the "results" step in order to comment on
2595 # bugs. If you change this, please update the perf dashboard as well.
2596 bisect_utils
.OutputAnnotationStepStart('Results')
2598 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
2599 self
._PrintBanner
(results_dict
)
2600 for culprit
in results_dict
['culprit_revisions']:
2601 cl
, info
, depot
= culprit
2602 self
._PrintRevisionInfo
(cl
, info
, depot
)
2603 self
._PrintReproSteps
()
2604 if results_dict
['other_regressions']:
2605 self
._PrintOtherRegressions
(results_dict
['other_regressions'],
2608 self
._PrintFailedBanner
(results_dict
)
2609 self
._PrintReproSteps
()
2611 self
._PrintTestedCommitsTable
(revision_data_sorted
,
2612 results_dict
['first_working_revision'],
2613 results_dict
['last_broken_revision'],
2614 results_dict
['confidence'])
2615 self
._PrintStepTime
(revision_data_sorted
)
2616 self
._PrintWarnings
()
2618 if self
.opts
.output_buildbot_annotations
:
2619 bisect_utils
.OutputAnnotationStepClosed()
2622 def DetermineAndCreateSourceControl(opts
):
2623 """Attempts to determine the underlying source control workflow and returns
2624 a SourceControl object.
2627 An instance of a SourceControl object, or None if the current workflow
2631 (output
, return_code
) = RunGit(['rev-parse', '--is-inside-work-tree'])
2633 if output
.strip() == 'true':
2634 return GitSourceControl(opts
)
2639 def IsPlatformSupported(opts
):
2640 """Checks that this platform and build system are supported.
2643 opts: The options parsed from the command line.
2646 True if the platform and build system are supported.
2648 # Haven't tested the script out on any other platforms yet.
2649 supported
= ['posix', 'nt']
2650 return os
.name
in supported
2653 def RmTreeAndMkDir(path_to_dir
):
2654 """Removes the directory tree specified, and then creates an empty
2655 directory in the same location.
2658 path_to_dir: Path to the directory tree.
2661 True if successful, False if an error occurred.
2664 if os
.path
.exists(path_to_dir
):
2665 shutil
.rmtree(path_to_dir
)
2667 if e
.errno
!= errno
.ENOENT
:
2671 os
.makedirs(path_to_dir
)
2673 if e
.errno
!= errno
.EEXIST
:
2679 def RemoveBuildFiles():
2680 """Removes build files from previous runs."""
2681 if RmTreeAndMkDir(os
.path
.join('out', 'Release')):
2682 if RmTreeAndMkDir(os
.path
.join('build', 'Release')):
2687 class BisectOptions(object):
2688 """Options to be used when running bisection."""
2690 super(BisectOptions
, self
).__init
__()
2692 self
.target_platform
= 'chromium'
2693 self
.build_preference
= None
2694 self
.good_revision
= None
2695 self
.bad_revision
= None
2696 self
.use_goma
= None
2697 self
.cros_board
= None
2698 self
.cros_remote_ip
= None
2699 self
.repeat_test_count
= 20
2700 self
.truncate_percent
= 25
2701 self
.max_time_minutes
= 20
2704 self
.output_buildbot_annotations
= None
2705 self
.no_custom_deps
= False
2706 self
.working_directory
= None
2707 self
.extra_src
= None
2708 self
.debug_ignore_build
= None
2709 self
.debug_ignore_sync
= None
2710 self
.debug_ignore_perf_test
= None
2712 def _CreateCommandLineParser(self
):
2713 """Creates a parser with bisect options.
2716 An instance of optparse.OptionParser.
2718 usage
= ('%prog [options] [-- chromium-options]\n'
2719 'Perform binary search on revision history to find a minimal '
2720 'range of revisions where a peformance metric regressed.\n')
2722 parser
= optparse
.OptionParser(usage
=usage
)
2724 group
= optparse
.OptionGroup(parser
, 'Bisect options')
2725 group
.add_option('-c', '--command',
2727 help='A command to execute your performance test at' +
2728 ' each point in the bisection.')
2729 group
.add_option('-b', '--bad_revision',
2731 help='A bad revision to start bisection. ' +
2732 'Must be later than good revision. May be either a git' +
2733 ' or svn revision.')
2734 group
.add_option('-g', '--good_revision',
2736 help='A revision to start bisection where performance' +
2737 ' test is known to pass. Must be earlier than the ' +
2738 'bad revision. May be either a git or svn revision.')
2739 group
.add_option('-m', '--metric',
2741 help='The desired metric to bisect on. For example ' +
2742 '"vm_rss_final_b/vm_rss_f_b"')
2743 group
.add_option('-r', '--repeat_test_count',
2746 help='The number of times to repeat the performance '
2747 'test. Values will be clamped to range [1, 100]. '
2748 'Default value is 20.')
2749 group
.add_option('--max_time_minutes',
2752 help='The maximum time (in minutes) to take running the '
2753 'performance tests. The script will run the performance '
2754 'tests according to --repeat_test_count, so long as it '
2755 'doesn\'t exceed --max_time_minutes. Values will be '
2756 'clamped to range [1, 60].'
2757 'Default value is 20.')
2758 group
.add_option('-t', '--truncate_percent',
2761 help='The highest/lowest % are discarded to form a '
2762 'truncated mean. Values will be clamped to range [0, '
2763 '25]. Default value is 25 (highest/lowest 25% will be '
2765 parser
.add_option_group(group
)
2767 group
= optparse
.OptionGroup(parser
, 'Build options')
2768 group
.add_option('-w', '--working_directory',
2770 help='Path to the working directory where the script '
2771 'will do an initial checkout of the chromium depot. The '
2772 'files will be placed in a subdirectory "bisect" under '
2773 'working_directory and that will be used to perform the '
2774 'bisection. This parameter is optional, if it is not '
2775 'supplied, the script will work from the current depot.')
2776 group
.add_option('--build_preference',
2778 choices
=['msvs', 'ninja', 'make'],
2779 help='The preferred build system to use. On linux/mac '
2780 'the options are make/ninja. On Windows, the options '
2782 group
.add_option('--target_platform',
2784 choices
=['chromium', 'cros', 'android', 'android-chrome'],
2786 help='The target platform. Choices are "chromium" '
2787 '(current platform), "cros", or "android". If you '
2788 'specify something other than "chromium", you must be '
2789 'properly set up to build that platform.')
2790 group
.add_option('--no_custom_deps',
2791 dest
='no_custom_deps',
2792 action
="store_true",
2794 help='Run the script with custom_deps or not.')
2795 group
.add_option('--extra_src',
2797 help='Path to a script which can be used to modify '
2798 'the bisect script\'s behavior.')
2799 group
.add_option('--cros_board',
2801 help='The cros board type to build.')
2802 group
.add_option('--cros_remote_ip',
2804 help='The remote machine to image to.')
2805 group
.add_option('--use_goma',
2806 action
="store_true",
2807 help='Add a bunch of extra threads for goma.')
2808 group
.add_option('--output_buildbot_annotations',
2809 action
="store_true",
2810 help='Add extra annotation output for buildbot.')
2811 parser
.add_option_group(group
)
2813 group
= optparse
.OptionGroup(parser
, 'Debug options')
2814 group
.add_option('--debug_ignore_build',
2815 action
="store_true",
2816 help='DEBUG: Don\'t perform builds.')
2817 group
.add_option('--debug_ignore_sync',
2818 action
="store_true",
2819 help='DEBUG: Don\'t perform syncs.')
2820 group
.add_option('--debug_ignore_perf_test',
2821 action
="store_true",
2822 help='DEBUG: Don\'t perform performance tests.')
2823 parser
.add_option_group(group
)
2828 def ParseCommandLine(self
):
2829 """Parses the command line for bisect options."""
2830 parser
= self
._CreateCommandLineParser
()
2831 (opts
, args
) = parser
.parse_args()
2834 if not opts
.command
:
2835 raise RuntimeError('missing required parameter: --command')
2837 if not opts
.good_revision
:
2838 raise RuntimeError('missing required parameter: --good_revision')
2840 if not opts
.bad_revision
:
2841 raise RuntimeError('missing required parameter: --bad_revision')
2844 raise RuntimeError('missing required parameter: --metric')
2846 if opts
.target_platform
== 'cros':
2847 # Run sudo up front to make sure credentials are cached for later.
2848 print 'Sudo is required to build cros:'
2850 RunProcess(['sudo', 'true'])
2852 if not opts
.cros_board
:
2853 raise RuntimeError('missing required parameter: --cros_board')
2855 if not opts
.cros_remote_ip
:
2856 raise RuntimeError('missing required parameter: --cros_remote_ip')
2858 if not opts
.working_directory
:
2859 raise RuntimeError('missing required parameter: --working_directory')
2861 metric_values
= opts
.metric
.split('/')
2862 if len(metric_values
) != 2:
2863 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
2865 opts
.metric
= metric_values
2866 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
2867 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
2868 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
2869 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
2871 for k
, v
in opts
.__dict
__.iteritems():
2872 assert hasattr(self
, k
), "Invalid %s attribute in BisectOptions." % k
2874 except RuntimeError, e
:
2875 output_string
= StringIO
.StringIO()
2876 parser
.print_help(file=output_string
)
2877 error_message
= '%s\n\n%s' % (e
.message
, output_string
.getvalue())
2878 output_string
.close()
2879 raise RuntimeError(error_message
)
2882 def FromDict(values
):
2883 """Creates an instance of BisectOptions with the values parsed from a
2887 values: a dict containing options to set.
2890 An instance of BisectOptions.
2892 opts
= BisectOptions()
2894 for k
, v
in values
.iteritems():
2895 assert hasattr(opts
, k
), 'Invalid %s attribute in '\
2896 'BisectOptions.' % k
2899 metric_values
= opts
.metric
.split('/')
2900 if len(metric_values
) != 2:
2901 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
2903 opts
.metric
= metric_values
2904 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
2905 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
2906 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
2907 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
2915 opts
= BisectOptions()
2916 parse_results
= opts
.ParseCommandLine()
2919 extra_src
= bisect_utils
.LoadExtraSrc(opts
.extra_src
)
2921 raise RuntimeError("Invalid or missing --extra_src.")
2922 _AddAdditionalDepotInfo(extra_src
.GetAdditionalDepotInfo())
2924 if opts
.working_directory
:
2925 custom_deps
= bisect_utils
.DEFAULT_GCLIENT_CUSTOM_DEPS
2926 if opts
.no_custom_deps
:
2928 bisect_utils
.CreateBisectDirectoryAndSetupDepot(opts
, custom_deps
)
2930 os
.chdir(os
.path
.join(os
.getcwd(), 'src'))
2932 if not RemoveBuildFiles():
2933 raise RuntimeError('Something went wrong removing the build files.')
2935 if not IsPlatformSupported(opts
):
2936 raise RuntimeError("Sorry, this platform isn't supported yet.")
2938 # Check what source control method they're using. Only support git workflow
2940 source_control
= DetermineAndCreateSourceControl(opts
)
2942 if not source_control
:
2943 raise RuntimeError("Sorry, only the git workflow is supported at the "
2946 # gClient sync seems to fail if you're not in master branch.
2947 if (not source_control
.IsInProperBranch() and
2948 not opts
.debug_ignore_sync
and
2949 not opts
.working_directory
):
2950 raise RuntimeError("You must switch to master branch to run bisection.")
2952 bisect_test
= BisectPerformanceMetrics(source_control
, opts
)
2954 bisect_results
= bisect_test
.Run(opts
.command
,
2958 if bisect_results
['error']:
2959 raise RuntimeError(bisect_results
['error'])
2960 bisect_test
.FormatAndPrintResults(bisect_results
)
2963 bisect_test
.PerformCleanup()
2964 except RuntimeError, e
:
2965 if opts
.output_buildbot_annotations
:
2966 # The perf dashboard scrapes the "results" step in order to comment on
2967 # bugs. If you change this, please update the perf dashboard as well.
2968 bisect_utils
.OutputAnnotationStepStart('Results')
2969 print 'Error: %s' % e
.message
2970 if opts
.output_buildbot_annotations
:
2971 bisect_utils
.OutputAnnotationStepClosed()
2974 if __name__
== '__main__':