2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
57 from telemetry
.page
import cloud_storage
59 sys
.path
.append(os
.path
.join(os
.path
.dirname(sys
.argv
[0]), 'telemetry'))
60 from telemetry
.page
import cloud_storage
62 # The additional repositories that might need to be bisected.
63 # If the repository has any dependant repositories (such as skia/src needs
64 # skia/include and skia/gyp to be updated), specify them in the 'depends'
65 # so that they're synced appropriately.
67 # src: path to the working directory.
68 # recurse: True if this repositry will get bisected.
69 # depends: A list of other repositories that are actually part of the same
71 # svn: Needed for git workflow to resolve hashes to svn revisions.
72 # from: Parent depot that must be bisected before this is bisected.
78 "from" : ['cros', 'android-chrome'],
79 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision='
82 "src" : "src/third_party/WebKit",
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision='
89 "src" : "src/third_party/angle",
90 "src_old" : "src/third_party/angle_dx11",
93 "from" : ['chromium'],
100 "from" : ['chromium'],
101 "custom_deps": bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
,
102 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
104 'v8_bleeding_edge' : {
105 "src" : "src/v8_bleeding_edge",
108 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
110 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
113 "src" : "src/third_party/skia/src",
115 "svn" : "http://skia.googlecode.com/svn/trunk/src",
116 "depends" : ['skia/include', 'skia/gyp'],
117 "from" : ['chromium'],
118 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
121 "src" : "src/third_party/skia/include",
123 "svn" : "http://skia.googlecode.com/svn/trunk/include",
125 "from" : ['chromium'],
126 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
129 "src" : "src/third_party/skia/gyp",
131 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
133 "from" : ['chromium'],
134 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
138 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
139 CROS_SDK_PATH
= os
.path
.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
140 CROS_VERSION_PATTERN
= 'new version number from %s'
141 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
142 CROS_TEST_KEY_PATH
= os
.path
.join('..', 'cros', 'chromite', 'ssh_keys',
144 CROS_SCRIPT_KEY_PATH
= os
.path
.join('..', 'cros', 'src', 'scripts',
145 'mod_for_test_scripts', 'ssh_keys',
148 BUILD_RESULT_SUCCEED
= 0
149 BUILD_RESULT_FAIL
= 1
150 BUILD_RESULT_SKIPPED
= 2
153 def _AddAdditionalDepotInfo(depot_info
):
154 """Adds additional depot info to the global depot variables."""
155 global DEPOT_DEPS_NAME
157 DEPOT_DEPS_NAME
= dict(DEPOT_DEPS_NAME
.items() +
159 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
162 def CalculateTruncatedMean(data_set
, truncate_percent
):
163 """Calculates the truncated mean of a set of values.
166 data_set: Set of values to use in calculation.
167 truncate_percent: The % from the upper/lower portions of the data set to
168 discard, expressed as a value in [0, 1].
171 The truncated mean as a float.
173 if len(data_set
) > 2:
174 data_set
= sorted(data_set
)
176 discard_num_float
= len(data_set
) * truncate_percent
177 discard_num_int
= int(math
.floor(discard_num_float
))
178 kept_weight
= len(data_set
) - discard_num_float
* 2
180 data_set
= data_set
[discard_num_int
:len(data_set
)-discard_num_int
]
182 weight_left
= 1.0 - (discard_num_float
- discard_num_int
)
185 # If the % to discard leaves a fractional portion, need to weight those
187 unweighted_vals
= data_set
[1:len(data_set
)-1]
188 weighted_vals
= [data_set
[0], data_set
[len(data_set
)-1]]
189 weighted_vals
= [w
* weight_left
for w
in weighted_vals
]
190 data_set
= weighted_vals
+ unweighted_vals
192 kept_weight
= len(data_set
)
194 truncated_mean
= reduce(lambda x
, y
: float(x
) + float(y
),
195 data_set
) / kept_weight
197 return truncated_mean
200 def CalculateStandardDeviation(v
):
204 mean
= CalculateTruncatedMean(v
, 0.0)
205 variances
= [float(x
) - mean
for x
in v
]
206 variances
= [x
* x
for x
in variances
]
207 variance
= reduce(lambda x
, y
: float(x
) + float(y
), variances
) / (len(v
) - 1)
208 std_dev
= math
.sqrt(variance
)
213 def CalculatePooledStandardError(work_sets
):
218 for current_set
in work_sets
:
219 std_dev
= CalculateStandardDeviation(current_set
)
220 numerator
+= (len(current_set
) - 1) * std_dev
** 2
221 denominator1
+= len(current_set
) - 1
222 denominator2
+= 1.0 / len(current_set
)
225 return math
.sqrt(numerator
/ denominator1
) * math
.sqrt(denominator2
)
229 def CalculateStandardError(v
):
233 std_dev
= CalculateStandardDeviation(v
)
235 return std_dev
/ math
.sqrt(len(v
))
238 def IsStringFloat(string_to_check
):
239 """Checks whether or not the given string can be converted to a floating
243 string_to_check: Input string to check if it can be converted to a float.
246 True if the string can be converted to a float.
249 float(string_to_check
)
256 def IsStringInt(string_to_check
):
257 """Checks whether or not the given string can be converted to a integer.
260 string_to_check: Input string to check if it can be converted to an int.
263 True if the string can be converted to an int.
274 """Checks whether or not the script is running on Windows.
277 True if running on Windows.
279 return sys
.platform
== 'cygwin' or sys
.platform
.startswith('win')
282 def Is64BitWindows():
283 """Returns whether or not Windows is a 64-bit version.
286 True if Windows is 64-bit, False if 32-bit.
288 platform
= os
.environ
['PROCESSOR_ARCHITECTURE']
290 platform
= os
.environ
['PROCESSOR_ARCHITEW6432']
292 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
295 return platform
in ['AMD64', 'I64']
299 """Checks whether or not the script is running on Linux.
302 True if running on Linux.
304 return sys
.platform
.startswith('linux')
308 """Checks whether or not the script is running on Mac.
311 True if running on Mac.
313 return sys
.platform
.startswith('darwin')
316 def GetZipFileName(build_revision
=None, target_arch
='ia32'):
317 """Gets the archive file name for the given revision."""
319 """Return a string to be used in paths for the platform."""
321 # Build archive for x64 is still stored with 'win32'suffix
322 # (chromium_utils.PlatformName()).
323 if Is64BitWindows() and target_arch
== 'x64':
330 raise NotImplementedError('Unknown platform "%s".' % sys
.platform
)
332 base_name
= 'full-build-%s' % PlatformName()
333 if not build_revision
:
335 return '%s_%s.zip' % (base_name
, build_revision
)
338 def GetRemoteBuildPath(build_revision
, target_arch
='ia32'):
339 """Compute the url to download the build from."""
340 def GetGSRootFolderName():
341 """Gets Google Cloud Storage root folder names"""
343 if Is64BitWindows() and target_arch
== 'x64':
344 return 'Win x64 Builder'
347 return 'Linux Builder'
350 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
352 base_filename
= GetZipFileName(build_revision
, target_arch
)
353 builder_folder
= GetGSRootFolderName()
354 return '%s/%s' % (builder_folder
, base_filename
)
357 def FetchFromCloudStorage(bucket_name
, source_path
, destination_path
):
358 """Fetches file(s) from the Google Cloud Storage.
361 bucket_name: Google Storage bucket name.
362 source_path: Source file path.
363 destination_path: Destination file path.
366 True if the fetching succeeds, otherwise False.
368 target_file
= os
.path
.join(destination_path
, os
.path
.basename(source_path
))
370 if cloud_storage
.Exists(bucket_name
, source_path
):
371 print 'Fetching file from gs//%s/%s ...' % (bucket_name
, source_path
)
372 cloud_storage
.Get(bucket_name
, source_path
, destination_path
)
373 if os
.path
.exists(target_file
):
376 print ('File gs://%s/%s not found in cloud storage.' % (
377 bucket_name
, source_path
))
379 print 'Something went wrong while fetching file from cloud: %s' % e
380 if os
.path
.exists(target_file
):
381 os
.remove(target_file
)
385 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
386 def MaybeMakeDirectory(*path
):
387 """Creates an entire path, if it doesn't already exist."""
388 file_path
= os
.path
.join(*path
)
390 os
.makedirs(file_path
)
392 if e
.errno
!= errno
.EEXIST
:
397 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
398 def ExtractZip(filename
, output_dir
, verbose
=True):
399 """ Extract the zip archive in the output directory."""
400 MaybeMakeDirectory(output_dir
)
402 # On Linux and Mac, we use the unzip command as it will
403 # handle links and file bits (executable), which is much
404 # easier then trying to do that with ZipInfo options.
406 # On Windows, try to use 7z if it is installed, otherwise fall back to python
407 # zip module and pray we don't have files larger than 512MB to unzip.
409 if IsMac() or IsLinux():
410 unzip_cmd
= ['unzip', '-o']
411 elif IsWindows() and os
.path
.exists('C:\\Program Files\\7-Zip\\7z.exe'):
412 unzip_cmd
= ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
415 # Make sure path is absolute before changing directories.
416 filepath
= os
.path
.abspath(filename
)
417 saved_dir
= os
.getcwd()
419 command
= unzip_cmd
+ [filepath
]
420 result
= RunProcess(command
)
423 raise IOError('unzip failed: %s => %s' % (str(command
), result
))
426 zf
= zipfile
.ZipFile(filename
)
427 for name
in zf
.namelist():
429 print 'Extracting %s' % name
430 zf
.extract(name
, output_dir
)
433 def RunProcess(command
):
434 """Run an arbitrary command. If output from the call is needed, use
435 RunProcessAndRetrieveOutput instead.
438 command: A list containing the command and args to execute.
441 The return code of the call.
443 # On Windows, use shell=True to get PATH interpretation.
445 return subprocess
.call(command
, shell
=shell
)
448 def RunProcessAndRetrieveOutput(command
, cwd
=None):
449 """Run an arbitrary command, returning its output and return code. Since
450 output is collected via communicate(), there will be no output until the
451 call terminates. If you need output while the program runs (ie. so
452 that the buildbot doesn't terminate the script), consider RunProcess().
455 command: A list containing the command and args to execute.
458 A tuple of the output and return code.
460 # On Windows, use shell=True to get PATH interpretation.
462 proc
= subprocess
.Popen(command
, shell
=shell
, stdout
=subprocess
.PIPE
, cwd
=cwd
)
464 (output
, _
) = proc
.communicate()
466 return (output
, proc
.returncode
)
469 def RunGit(command
, cwd
=None):
470 """Run a git subcommand, returning its output and return code.
473 command: A list containing the args to git.
476 A tuple of the output and return code.
478 command
= ['git'] + command
480 return RunProcessAndRetrieveOutput(command
, cwd
=cwd
)
483 def CheckRunGit(command
, cwd
=None):
484 """Run a git subcommand, returning its output and return code. Asserts if
485 the return code of the call is non-zero.
488 command: A list containing the args to git.
491 A tuple of the output and return code.
493 (output
, return_code
) = RunGit(command
, cwd
=cwd
)
495 assert not return_code
, 'An error occurred while running'\
496 ' "git %s"' % ' '.join(command
)
500 def SetBuildSystemDefault(build_system
):
501 """Sets up any environment variables needed to build with the specified build
505 build_system: A string specifying build system. Currently only 'ninja' or
506 'make' are supported."""
507 if build_system
== 'ninja':
508 gyp_var
= os
.getenv('GYP_GENERATORS')
510 if not gyp_var
or not 'ninja' in gyp_var
:
512 os
.environ
['GYP_GENERATORS'] = gyp_var
+ ',ninja'
514 os
.environ
['GYP_GENERATORS'] = 'ninja'
517 os
.environ
['GYP_DEFINES'] = 'component=shared_library '\
518 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
520 elif build_system
== 'make':
521 os
.environ
['GYP_GENERATORS'] = 'make'
523 raise RuntimeError('%s build not supported.' % build_system
)
526 def BuildWithMake(threads
, targets
):
527 cmd
= ['make', 'BUILDTYPE=Release']
530 cmd
.append('-j%d' % threads
)
534 return_code
= RunProcess(cmd
)
536 return not return_code
539 def BuildWithNinja(threads
, targets
):
540 cmd
= ['ninja', '-C', os
.path
.join('out', 'Release')]
543 cmd
.append('-j%d' % threads
)
547 return_code
= RunProcess(cmd
)
549 return not return_code
552 def BuildWithVisualStudio(targets
):
553 path_to_devenv
= os
.path
.abspath(
554 os
.path
.join(os
.environ
['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
555 path_to_sln
= os
.path
.join(os
.getcwd(), 'chrome', 'chrome.sln')
556 cmd
= [path_to_devenv
, '/build', 'Release', path_to_sln
]
559 cmd
.extend(['/Project', t
])
561 return_code
= RunProcess(cmd
)
563 return not return_code
566 class Builder(object):
567 """Builder is used by the bisect script to build relevant targets and deploy.
569 def __init__(self
, opts
):
570 """Performs setup for building with target build system.
573 opts: Options parsed from command line.
576 if not opts
.build_preference
:
577 opts
.build_preference
= 'msvs'
579 if opts
.build_preference
== 'msvs':
580 if not os
.getenv('VS100COMNTOOLS'):
582 'Path to visual studio could not be determined.')
584 SetBuildSystemDefault(opts
.build_preference
)
586 if not opts
.build_preference
:
587 if 'ninja' in os
.getenv('GYP_GENERATORS'):
588 opts
.build_preference
= 'ninja'
590 opts
.build_preference
= 'make'
592 SetBuildSystemDefault(opts
.build_preference
)
594 if not bisect_utils
.SetupPlatformBuildEnvironment(opts
):
595 raise RuntimeError('Failed to set platform environment.')
597 bisect_utils
.RunGClient(['runhooks'])
602 if opts
.target_platform
== 'cros':
603 builder
= CrosBuilder(opts
)
604 elif opts
.target_platform
== 'android':
605 builder
= AndroidBuilder(opts
)
606 elif opts
.target_platform
== 'android-chrome':
607 builder
= AndroidChromeBuilder(opts
)
609 builder
= DesktopBuilder(opts
)
612 def Build(self
, depot
, opts
):
613 raise NotImplementedError()
615 def GetBuildOutputDirectory(self
, opts
, src_dir
=None):
616 raise NotImplementedError()
619 class DesktopBuilder(Builder
):
620 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
621 def __init__(self
, opts
):
622 super(DesktopBuilder
, self
).__init
__(opts
)
624 def Build(self
, depot
, opts
):
625 """Builds chromium_builder_perf target using options passed into
629 depot: Current depot being bisected.
630 opts: The options parsed from the command line.
633 True if build was successful.
635 targets
= ['chromium_builder_perf']
641 build_success
= False
642 if opts
.build_preference
== 'make':
643 build_success
= BuildWithMake(threads
, targets
)
644 elif opts
.build_preference
== 'ninja':
645 build_success
= BuildWithNinja(threads
, targets
)
646 elif opts
.build_preference
== 'msvs':
647 assert IsWindows(), 'msvs is only supported on Windows.'
648 build_success
= BuildWithVisualStudio(targets
)
650 assert False, 'No build system defined.'
653 def GetBuildOutputDirectory(self
, opts
, src_dir
=None):
654 """Returns the path to the build directory, relative to the checkout root.
656 Assumes that the current working directory is the checkout root.
658 src_dir
= src_dir
or 'src'
659 if opts
.build_preference
== 'ninja' or IsLinux():
660 return os
.path
.join(src_dir
, 'out')
662 return os
.path
.join(src_dir
, 'xcodebuild')
664 return os
.path
.join(src_dir
, 'build')
665 raise NotImplementedError('Unexpected platform %s' % sys
.platform
)
668 class AndroidBuilder(Builder
):
669 """AndroidBuilder is used to build on android."""
670 def __init__(self
, opts
):
671 super(AndroidBuilder
, self
).__init
__(opts
)
673 def _GetTargets(self
):
674 return ['chromium_testshell', 'cc_perftests_apk', 'android_tools']
676 def Build(self
, depot
, opts
):
677 """Builds the android content shell and other necessary tools using options
678 passed into the script.
681 depot: Current depot being bisected.
682 opts: The options parsed from the command line.
685 True if build was successful.
691 build_success
= False
692 if opts
.build_preference
== 'ninja':
693 build_success
= BuildWithNinja(threads
, self
._GetTargets
())
695 assert False, 'No build system defined.'
700 class AndroidChromeBuilder(AndroidBuilder
):
701 """AndroidBuilder is used to build on android's chrome."""
702 def __init__(self
, opts
):
703 super(AndroidChromeBuilder
, self
).__init
__(opts
)
705 def _GetTargets(self
):
706 return AndroidBuilder
._GetTargets
(self
) + ['chrome_apk']
709 class CrosBuilder(Builder
):
710 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
712 def __init__(self
, opts
):
713 super(CrosBuilder
, self
).__init
__(opts
)
715 def ImageToTarget(self
, opts
):
716 """Installs latest image to target specified by opts.cros_remote_ip.
719 opts: Program options containing cros_board and cros_remote_ip.
725 # Keys will most likely be set to 0640 after wiping the chroot.
726 os
.chmod(CROS_SCRIPT_KEY_PATH
, 0600)
727 os
.chmod(CROS_TEST_KEY_PATH
, 0600)
728 cmd
= [CROS_SDK_PATH
, '--', './bin/cros_image_to_target.py',
729 '--remote=%s' % opts
.cros_remote_ip
,
730 '--board=%s' % opts
.cros_board
, '--test', '--verbose']
732 return_code
= RunProcess(cmd
)
733 return not return_code
737 def BuildPackages(self
, opts
, depot
):
738 """Builds packages for cros.
741 opts: Program options containing cros_board.
742 depot: The depot being bisected.
747 cmd
= [CROS_SDK_PATH
]
750 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
751 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
756 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
758 cmd
+= ['BUILDTYPE=Release', './build_packages',
759 '--board=%s' % opts
.cros_board
]
760 return_code
= RunProcess(cmd
)
762 return not return_code
764 def BuildImage(self
, opts
, depot
):
765 """Builds test image for cros.
768 opts: Program options containing cros_board.
769 depot: The depot being bisected.
774 cmd
= [CROS_SDK_PATH
]
777 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
778 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
783 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
785 cmd
+= ['BUILDTYPE=Release', '--', './build_image',
786 '--board=%s' % opts
.cros_board
, 'test']
788 return_code
= RunProcess(cmd
)
790 return not return_code
792 def Build(self
, depot
, opts
):
793 """Builds targets using options passed into the script.
796 depot: Current depot being bisected.
797 opts: The options parsed from the command line.
800 True if build was successful.
802 if self
.BuildPackages(opts
, depot
):
803 if self
.BuildImage(opts
, depot
):
804 return self
.ImageToTarget(opts
)
808 class SourceControl(object):
809 """SourceControl is an abstraction over the underlying source control
810 system used for chromium. For now only git is supported, but in the
811 future, the svn workflow could be added as well."""
813 super(SourceControl
, self
).__init
__()
815 def SyncToRevisionWithGClient(self
, revision
):
816 """Uses gclient to sync to the specified revision.
818 ie. gclient sync --revision <revision>
821 revision: The git SHA1 or svn CL (depending on workflow).
824 The return code of the call.
826 return bisect_utils
.RunGClient(['sync', '--revision',
827 revision
, '--verbose', '--nohooks', '--reset', '--force'])
829 def SyncToRevisionWithRepo(self
, timestamp
):
830 """Uses repo to sync all the underlying git depots to the specified
834 timestamp: The unix timestamp to sync to.
837 The return code of the call.
839 return bisect_utils
.RunRepoSyncAtTimestamp(timestamp
)
842 class GitSourceControl(SourceControl
):
843 """GitSourceControl is used to query the underlying source control. """
844 def __init__(self
, opts
):
845 super(GitSourceControl
, self
).__init
__()
851 def GetRevisionList(self
, revision_range_end
, revision_range_start
, cwd
=None):
852 """Retrieves a list of revisions between |revision_range_start| and
853 |revision_range_end|.
856 revision_range_end: The SHA1 for the end of the range.
857 revision_range_start: The SHA1 for the beginning of the range.
860 A list of the revisions between |revision_range_start| and
861 |revision_range_end| (inclusive).
863 revision_range
= '%s..%s' % (revision_range_start
, revision_range_end
)
864 cmd
= ['log', '--format=%H', '-10000', '--first-parent', revision_range
]
865 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
867 revision_hash_list
= log_output
.split()
868 revision_hash_list
.append(revision_range_start
)
870 return revision_hash_list
872 def SyncToRevision(self
, revision
, sync_client
=None):
873 """Syncs to the specified revision.
876 revision: The revision to sync to.
877 use_gclient: Specifies whether or not we should sync using gclient or
878 just use source control directly.
885 results
= RunGit(['checkout', revision
])[1]
886 elif sync_client
== 'gclient':
887 results
= self
.SyncToRevisionWithGClient(revision
)
888 elif sync_client
== 'repo':
889 results
= self
.SyncToRevisionWithRepo(revision
)
893 def ResolveToRevision(self
, revision_to_check
, depot
, search
, cwd
=None):
894 """If an SVN revision is supplied, try to resolve it to a git SHA1.
897 revision_to_check: The user supplied revision string that may need to be
898 resolved to a git SHA1.
899 depot: The depot the revision_to_check is from.
900 search: The number of changelists to try if the first fails to resolve
901 to a git hash. If the value is negative, the function will search
902 backwards chronologically, otherwise it will search forward.
905 A string containing a git SHA1 hash, otherwise None.
907 # Android-chrome is git only, so no need to resolve this to anything else.
908 if depot
== 'android-chrome':
909 return revision_to_check
912 if not IsStringInt(revision_to_check
):
913 return revision_to_check
915 depot_svn
= 'svn://svn.chromium.org/chrome/trunk/src'
917 if depot
!= 'chromium':
918 depot_svn
= DEPOT_DEPS_NAME
[depot
]['svn']
920 svn_revision
= int(revision_to_check
)
924 search_range
= xrange(svn_revision
, svn_revision
+ search
, 1)
926 search_range
= xrange(svn_revision
, svn_revision
+ search
, -1)
928 for i
in search_range
:
929 svn_pattern
= 'git-svn-id: %s@%d' % (depot_svn
, i
)
930 cmd
= ['log', '--format=%H', '-1', '--grep', svn_pattern
,
933 (log_output
, return_code
) = RunGit(cmd
, cwd
=cwd
)
935 assert not return_code
, 'An error occurred while running'\
936 ' "git %s"' % ' '.join(cmd
)
939 log_output
= log_output
.strip()
942 git_revision
= log_output
948 if IsStringInt(revision_to_check
):
949 return int(revision_to_check
)
952 os
.chdir(os
.path
.join(os
.getcwd(), 'src', 'third_party',
953 'chromiumos-overlay'))
954 pattern
= CROS_VERSION_PATTERN
% revision_to_check
955 cmd
= ['log', '--format=%ct', '-1', '--grep', pattern
]
959 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
961 git_revision
= log_output
962 git_revision
= int(log_output
.strip())
967 def IsInProperBranch(self
):
968 """Confirms they're in the master branch for performing the bisection.
969 This is needed or gclient will fail to sync properly.
972 True if the current branch on src is 'master'
974 cmd
= ['rev-parse', '--abbrev-ref', 'HEAD']
975 log_output
= CheckRunGit(cmd
)
976 log_output
= log_output
.strip()
978 return log_output
== "master"
980 def SVNFindRev(self
, revision
):
981 """Maps directly to the 'git svn find-rev' command.
984 revision: The git SHA1 to use.
987 An integer changelist #, otherwise None.
990 cmd
= ['svn', 'find-rev', revision
]
992 output
= CheckRunGit(cmd
)
993 svn_revision
= output
.strip()
995 if IsStringInt(svn_revision
):
996 return int(svn_revision
)
1000 def QueryRevisionInfo(self
, revision
, cwd
=None):
1001 """Gathers information on a particular revision, such as author's name,
1002 email, subject, and date.
1005 revision: Revision you want to gather information on.
1007 A dict in the following format:
1018 formats
= ['%cN', '%cE', '%s', '%cD', '%b']
1019 targets
= ['author', 'email', 'subject', 'date', 'body']
1021 for i
in xrange(len(formats
)):
1022 cmd
= ['log', '--format=%s' % formats
[i
], '-1', revision
]
1023 output
= CheckRunGit(cmd
, cwd
=cwd
)
1024 commit_info
[targets
[i
]] = output
.rstrip()
1028 def CheckoutFileAtRevision(self
, file_name
, revision
, cwd
=None):
1029 """Performs a checkout on a file at the given revision.
1034 return not RunGit(['checkout', revision
, file_name
], cwd
=cwd
)[1]
1036 def RevertFileToHead(self
, file_name
):
1037 """Unstages a file and returns it to HEAD.
1042 # Reset doesn't seem to return 0 on success.
1043 RunGit(['reset', 'HEAD', bisect_utils
.FILE_DEPS_GIT
])
1045 return not RunGit(['checkout', bisect_utils
.FILE_DEPS_GIT
])[1]
1047 def QueryFileRevisionHistory(self
, filename
, revision_start
, revision_end
):
1048 """Returns a list of commits that modified this file.
1051 filename: Name of file.
1052 revision_start: Start of revision range.
1053 revision_end: End of revision range.
1056 Returns a list of commits that touched this file.
1058 cmd
= ['log', '--format=%H', '%s~1..%s' % (revision_start
, revision_end
),
1060 output
= CheckRunGit(cmd
)
1062 return [o
for o
in output
.split('\n') if o
]
1064 class BisectPerformanceMetrics(object):
1065 """BisectPerformanceMetrics performs a bisection against a list of range
1066 of revisions to narrow down where performance regressions may have
1069 def __init__(self
, source_control
, opts
):
1070 super(BisectPerformanceMetrics
, self
).__init
__()
1073 self
.source_control
= source_control
1074 self
.src_cwd
= os
.getcwd()
1075 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
1077 self
.cleanup_commands
= []
1079 self
.builder
= Builder
.FromOpts(opts
)
1081 # This always starts true since the script grabs latest first.
1082 self
.was_blink
= True
1084 for d
in DEPOT_NAMES
:
1085 # The working directory of each depot is just the path to the depot, but
1086 # since we're already in 'src', we can skip that part.
1088 self
.depot_cwd
[d
] = os
.path
.join(
1089 self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
1091 def PerformCleanup(self
):
1092 """Performs cleanup when script is finished."""
1093 os
.chdir(self
.src_cwd
)
1094 for c
in self
.cleanup_commands
:
1096 shutil
.move(c
[1], c
[2])
1098 assert False, 'Invalid cleanup command.'
1100 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
1101 """Retrieves a list of all the commits between the bad revision and
1102 last known good revision."""
1104 revision_work_list
= []
1107 revision_range_start
= good_revision
1108 revision_range_end
= bad_revision
1111 self
.ChangeToDepotWorkingDirectory('cros')
1113 # Print the commit timestamps for every commit in the revision time
1114 # range. We'll sort them and bisect by that. There is a remote chance that
1115 # 2 (or more) commits will share the exact same timestamp, but it's
1116 # probably safe to ignore that case.
1117 cmd
= ['repo', 'forall', '-c',
1118 'git log --format=%%ct --before=%d --after=%d' % (
1119 revision_range_end
, revision_range_start
)]
1120 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1122 assert not return_code
, 'An error occurred while running'\
1123 ' "%s"' % ' '.join(cmd
)
1127 revision_work_list
= list(set(
1128 [int(o
) for o
in output
.split('\n') if IsStringInt(o
)]))
1129 revision_work_list
= sorted(revision_work_list
, reverse
=True)
1131 cwd
= self
._GetDepotDirectory
(depot
)
1132 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
1133 good_revision
, cwd
=cwd
)
1135 return revision_work_list
1137 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self
, revision
):
1138 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1140 if IsStringInt(svn_revision
):
1141 # V8 is tricky to bisect, in that there are only a few instances when
1142 # we can dive into bleeding_edge and get back a meaningful result.
1143 # Try to detect a V8 "business as usual" case, which is when:
1144 # 1. trunk revision N has description "Version X.Y.Z"
1145 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1146 # trunk. Now working on X.Y.(Z+1)."
1148 # As of 01/24/2014, V8 trunk descriptions are formatted:
1149 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1150 # So we can just try parsing that out first and fall back to the old way.
1151 v8_dir
= self
._GetDepotDirectory
('v8')
1152 v8_bleeding_edge_dir
= self
._GetDepotDirectory
('v8_bleeding_edge')
1154 revision_info
= self
.source_control
.QueryRevisionInfo(revision
,
1157 version_re
= re
.compile("Version (?P<values>[0-9,.]+)")
1159 regex_results
= version_re
.search(revision_info
['subject'])
1164 # Look for "based on bleeding_edge" and parse out revision
1165 if 'based on bleeding_edge' in revision_info
['subject']:
1167 bleeding_edge_revision
= revision_info
['subject'].split(
1168 'bleeding_edge revision r')[1]
1169 bleeding_edge_revision
= int(bleeding_edge_revision
.split(')')[0])
1170 git_revision
= self
.source_control
.ResolveToRevision(
1171 bleeding_edge_revision
, 'v8_bleeding_edge', 1,
1172 cwd
=v8_bleeding_edge_dir
)
1173 except IndexError, ValueError:
1176 if not git_revision
:
1177 # Wasn't successful, try the old way of looking for "Prepare push to"
1178 git_revision
= self
.source_control
.ResolveToRevision(
1179 int(svn_revision
) - 1, 'v8_bleeding_edge', -1,
1180 cwd
=v8_bleeding_edge_dir
)
1183 revision_info
= self
.source_control
.QueryRevisionInfo(git_revision
,
1184 cwd
=v8_bleeding_edge_dir
)
1186 if 'Prepare push to trunk' in revision_info
['subject']:
1190 def _GetNearestV8BleedingEdgeFromTrunk(self
, revision
, search_forward
=True):
1191 cwd
= self
._GetDepotDirectory
('v8')
1192 cmd
= ['log', '--format=%ct', '-1', revision
]
1193 output
= CheckRunGit(cmd
, cwd
=cwd
)
1194 commit_time
= int(output
)
1198 cmd
= ['log', '--format=%H', '-10', '--after=%d' % commit_time
,
1200 output
= CheckRunGit(cmd
, cwd
=cwd
)
1201 output
= output
.split()
1203 commits
= reversed(commits
)
1205 cmd
= ['log', '--format=%H', '-10', '--before=%d' % commit_time
,
1207 output
= CheckRunGit(cmd
, cwd
=cwd
)
1208 output
= output
.split()
1211 bleeding_edge_revision
= None
1214 bleeding_edge_revision
= self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(c
)
1215 if bleeding_edge_revision
:
1218 return bleeding_edge_revision
1220 def Get3rdPartyRevisionsFromCurrentRevision(self
, depot
, revision
):
1221 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1224 A dict in the format {depot:revision} if successful, otherwise None.
1228 self
.ChangeToDepotWorkingDirectory(depot
)
1232 if depot
== 'chromium' or depot
== 'android-chrome':
1233 locals = {'Var': lambda _
: locals["vars"][_
],
1234 'From': lambda *args
: None}
1235 execfile(bisect_utils
.FILE_DEPS_GIT
, {}, locals)
1239 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1241 for d
in DEPOT_NAMES
:
1242 if DEPOT_DEPS_NAME
[d
].has_key('platform'):
1243 if DEPOT_DEPS_NAME
[d
]['platform'] != os
.name
:
1246 if (DEPOT_DEPS_NAME
[d
]['recurse'] and
1247 depot
in DEPOT_DEPS_NAME
[d
]['from']):
1248 if (locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src']) or
1249 locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src_old'])):
1250 if locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src']):
1251 re_results
= rxp
.search(locals['deps'][DEPOT_DEPS_NAME
[d
]['src']])
1252 self
.depot_cwd
[d
] =\
1253 os
.path
.join(self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
1254 elif locals['deps'].has_key(DEPOT_DEPS_NAME
[d
]['src_old']):
1256 rxp
.search(locals['deps'][DEPOT_DEPS_NAME
[d
]['src_old']])
1257 self
.depot_cwd
[d
] =\
1258 os
.path
.join(self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src_old'][4:])
1261 results
[d
] = re_results
.group('revision')
1263 print 'Couldn\'t parse revision for %s.' % d
1267 print 'Couldn\'t find %s while parsing .DEPS.git.' % d
1270 elif depot
== 'cros':
1271 cmd
= [CROS_SDK_PATH
, '--', 'portageq-%s' % self
.opts
.cros_board
,
1272 'best_visible', '/build/%s' % self
.opts
.cros_board
, 'ebuild',
1273 CROS_CHROMEOS_PATTERN
]
1274 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1276 assert not return_code
, 'An error occurred while running'\
1277 ' "%s"' % ' '.join(cmd
)
1279 if len(output
) > CROS_CHROMEOS_PATTERN
:
1280 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
1283 output
= output
.split('_')[0]
1286 contents
= output
.split('.')
1288 version
= contents
[2]
1290 if contents
[3] != '0':
1291 warningText
= 'Chrome version: %s.%s but using %s.0 to bisect.' %\
1292 (version
, contents
[3], version
)
1293 if not warningText
in self
.warnings
:
1294 self
.warnings
.append(warningText
)
1297 self
.ChangeToDepotWorkingDirectory('chromium')
1298 return_code
= CheckRunGit(['log', '-1', '--format=%H',
1299 '--author=chrome-release@google.com', '--grep=to %s' % version
,
1303 results
['chromium'] = output
.strip()
1305 # We can't try to map the trunk revision to bleeding edge yet, because
1306 # we don't know which direction to try to search in. Have to wait until
1307 # the bisect has narrowed the results down to 2 v8 rolls.
1308 results
['v8_bleeding_edge'] = None
1312 def BackupOrRestoreOutputdirectory(self
, restore
=False, build_type
='Release'):
1313 """Backs up or restores build output directory based on restore argument.
1316 restore: Indicates whether to restore or backup. Default is False(Backup)
1317 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1320 Path to backup or restored location as string. otherwise None if it fails.
1322 build_dir
= os
.path
.abspath(
1323 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1324 source_dir
= os
.path
.join(build_dir
, build_type
)
1325 destination_dir
= os
.path
.join(build_dir
, '%s.bak' % build_type
)
1327 source_dir
, destination_dir
= destination_dir
, source_dir
1328 if os
.path
.exists(source_dir
):
1329 RmTreeAndMkDir(destination_dir
, skip_makedir
=True)
1330 shutil
.move(source_dir
, destination_dir
)
1331 return destination_dir
1334 def DownloadCurrentBuild(self
, sha_revision
, build_type
='Release'):
1335 """Download the build archive for the given revision.
1338 sha_revision: The git SHA1 for the revision.
1339 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1342 True if download succeeds, otherwise False.
1344 # Get SVN revision for the given SHA, since builds are archived using SVN
1346 revision
= self
.source_control
.SVNFindRev(sha_revision
)
1349 'Failed to determine SVN revision for %s' % sha_revision
)
1351 abs_build_dir
= os
.path
.abspath(
1352 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1353 target_build_output_dir
= os
.path
.join(abs_build_dir
, build_type
)
1354 # Get build target architecture.
1355 build_arch
= self
.opts
.target_arch
1356 # File path of the downloaded archive file.
1357 archive_file_dest
= os
.path
.join(abs_build_dir
,
1358 GetZipFileName(revision
, build_arch
))
1359 if FetchFromCloudStorage(self
.opts
.gs_bucket
,
1360 GetRemoteBuildPath(revision
, build_arch
),
1362 # Generic name for the archive, created when archive file is extracted.
1363 output_dir
= os
.path
.join(abs_build_dir
,
1364 GetZipFileName(target_arch
=build_arch
))
1365 # Unzip build archive directory.
1367 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1368 ExtractZip(archive_file_dest
, abs_build_dir
)
1369 if os
.path
.exists(output_dir
):
1370 self
.BackupOrRestoreOutputdirectory(restore
=False)
1371 print 'Moving build from %s to %s' % (
1372 output_dir
, target_build_output_dir
)
1373 shutil
.move(output_dir
, target_build_output_dir
)
1375 raise IOError('Missing extracted folder %s ' % output_dir
)
1377 print 'Somewthing went wrong while extracting archive file: %s' % e
1378 self
.BackupOrRestoreOutputdirectory(restore
=True)
1379 # Cleanup any leftovers from unzipping.
1380 if os
.path
.exists(output_dir
):
1381 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1383 # Delete downloaded archive
1384 if os
.path
.exists(archive_file_dest
):
1385 os
.remove(archive_file_dest
)
1388 def BuildCurrentRevision(self
, depot
, revision
=None):
1389 """Builds chrome and performance_ui_tests on the current revision.
1392 True if the build was successful.
1394 if self
.opts
.debug_ignore_build
:
1397 os
.chdir(self
.src_cwd
)
1398 # Fetch build archive for the given revision from the cloud storage when
1399 # the storage bucket is passed.
1400 if depot
== 'chromium' and self
.opts
.gs_bucket
and revision
:
1401 if self
.DownloadCurrentBuild(revision
):
1404 raise RuntimeError('Failed to download build archive for revision %s.\n'
1405 'Unfortunately, bisection couldn\'t continue any '
1406 'further. Please try running script without '
1407 '--gs_bucket flag to produce local builds.' % revision
)
1409 build_success
= self
.builder
.Build(depot
, self
.opts
)
1411 return build_success
1413 def RunGClientHooks(self
):
1414 """Runs gclient with runhooks command.
1417 True if gclient reports no errors.
1420 if self
.opts
.debug_ignore_build
:
1423 return not bisect_utils
.RunGClient(['runhooks'], cwd
=self
.src_cwd
)
1425 def TryParseHistogramValuesFromOutput(self
, metric
, text
):
1426 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1429 metric: The metric as a list of [<trace>, <value>] strings.
1430 text: The text to parse the metric values from.
1433 A list of floating point numbers found.
1435 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
1437 text_lines
= text
.split('\n')
1440 for current_line
in text_lines
:
1441 if metric_formatted
in current_line
:
1442 current_line
= current_line
[len(metric_formatted
):]
1445 histogram_values
= eval(current_line
)
1447 for b
in histogram_values
['buckets']:
1448 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
1449 # Extends the list with N-elements with the average for that bucket.
1450 values_list
.extend([average_for_bucket
] * b
['count'])
1456 def TryParseResultValuesFromOutput(self
, metric
, text
):
1457 """Attempts to parse a metric in the format RESULT <graph: <trace>.
1460 metric: The metric as a list of [<trace>, <value>] strings.
1461 text: The text to parse the metric values from.
1464 A list of floating point numbers found.
1466 # Format is: RESULT <graph>: <trace>= <value> <units>
1467 metric_formatted
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
1469 text_lines
= text
.split('\n')
1472 for current_line
in text_lines
:
1473 # Parse the output from the performance test for the metric we're
1475 metric_re
= metric_formatted
+\
1476 "(\s)*(?P<values>[0-9]+(\.[0-9]*)?)"
1477 metric_re
= re
.compile(metric_re
)
1478 regex_results
= metric_re
.search(current_line
)
1480 if not regex_results
is None:
1481 values_list
+= [regex_results
.group('values')]
1483 metric_re
= metric_formatted
+\
1484 "(\s)*\[(\s)*(?P<values>[0-9,.]+)\]"
1485 metric_re
= re
.compile(metric_re
)
1486 regex_results
= metric_re
.search(current_line
)
1488 if not regex_results
is None:
1489 metric_values
= regex_results
.group('values')
1491 values_list
+= metric_values
.split(',')
1493 values_list
= [float(v
) for v
in values_list
if IsStringFloat(v
)]
1495 # If the metric is times/t, we need to sum the timings in order to get
1496 # similar regression results as the try-bots.
1497 metrics_to_sum
= [['times', 't'], ['times', 'page_load_time'],
1498 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1500 if metric
in metrics_to_sum
:
1502 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
1506 def ParseMetricValuesFromOutput(self
, metric
, text
):
1507 """Parses output from performance_ui_tests and retrieves the results for
1511 metric: The metric as a list of [<trace>, <value>] strings.
1512 text: The text to parse the metric values from.
1515 A list of floating point numbers found.
1517 metric_values
= self
.TryParseResultValuesFromOutput(metric
, text
)
1519 if not metric_values
:
1520 metric_values
= self
.TryParseHistogramValuesFromOutput(metric
, text
)
1522 return metric_values
1524 def _GenerateProfileIfNecessary(self
, command_args
):
1525 """Checks the command line of the performance test for dependencies on
1526 profile generation, and runs tools/perf/generate_profile as necessary.
1529 command_args: Command line being passed to performance test, as a list.
1532 False if profile generation was necessary and failed, otherwise True.
1535 if '--profile-dir' in ' '.join(command_args
):
1536 # If we were using python 2.7+, we could just use the argparse
1537 # module's parse_known_args to grab --profile-dir. Since some of the
1538 # bots still run 2.6, have to grab the arguments manually.
1540 args_to_parse
= ['--profile-dir', '--browser']
1542 for arg_to_parse
in args_to_parse
:
1543 for i
, current_arg
in enumerate(command_args
):
1544 if arg_to_parse
in current_arg
:
1545 current_arg_split
= current_arg
.split('=')
1547 # Check 2 cases, --arg=<val> and --arg <val>
1548 if len(current_arg_split
) == 2:
1549 arg_dict
[arg_to_parse
] = current_arg_split
[1]
1550 elif i
+ 1 < len(command_args
):
1551 arg_dict
[arg_to_parse
] = command_args
[i
+1]
1553 path_to_generate
= os
.path
.join('tools', 'perf', 'generate_profile')
1555 if arg_dict
.has_key('--profile-dir') and arg_dict
.has_key('--browser'):
1556 profile_path
, profile_type
= os
.path
.split(arg_dict
['--profile-dir'])
1557 return not RunProcess(['python', path_to_generate
,
1558 '--profile-type-to-generate', profile_type
,
1559 '--browser', arg_dict
['--browser'], '--output-dir', profile_path
])
1563 def RunPerformanceTestAndParseResults(self
, command_to_run
, metric
,
1564 reset_on_first_run
=False, upload_on_last_run
=False, results_label
=None):
1565 """Runs a performance test on the current revision by executing the
1566 'command_to_run' and parses the results.
1569 command_to_run: The command to be run to execute the performance test.
1570 metric: The metric to parse out from the results of the performance test.
1573 On success, it will return a tuple of the average value of the metric,
1574 and a success code of 0.
1577 if self
.opts
.debug_ignore_perf_test
:
1578 return ({'mean': 0.0, 'std_err': 0.0, 'std_dev': 0.0, 'values': [0.0]}, 0)
1581 command_to_run
= command_to_run
.replace('/', r
'\\')
1583 args
= shlex
.split(command_to_run
)
1585 if not self
._GenerateProfileIfNecessary
(args
):
1586 return ('Failed to generate profile for performance test.', -1)
1588 # If running a telemetry test for cros, insert the remote ip, and
1589 # identity parameters.
1590 is_telemetry
= bisect_utils
.IsTelemetryCommand(command_to_run
)
1591 if self
.opts
.target_platform
== 'cros' and is_telemetry
:
1592 args
.append('--remote=%s' % self
.opts
.cros_remote_ip
)
1593 args
.append('--identity=%s' % CROS_TEST_KEY_PATH
)
1596 os
.chdir(self
.src_cwd
)
1598 start_time
= time
.time()
1601 output_of_all_runs
= ''
1602 for i
in xrange(self
.opts
.repeat_test_count
):
1603 # Can ignore the return code since if the tests fail, it won't return 0.
1605 current_args
= copy
.copy(args
)
1607 if i
== 0 and reset_on_first_run
:
1608 current_args
.append('--reset-results')
1609 elif i
== self
.opts
.repeat_test_count
- 1 and upload_on_last_run
:
1610 current_args
.append('--upload-results')
1612 current_args
.append('--results-label=%s' % results_label
)
1613 (output
, return_code
) = RunProcessAndRetrieveOutput(current_args
)
1615 if e
.errno
== errno
.ENOENT
:
1616 err_text
= ("Something went wrong running the performance test. "
1617 "Please review the command line:\n\n")
1618 if 'src/' in ' '.join(args
):
1619 err_text
+= ("Check that you haven't accidentally specified a path "
1620 "with src/ in the command.\n\n")
1621 err_text
+= ' '.join(args
)
1624 return (err_text
, -1)
1627 output_of_all_runs
+= output
1628 if self
.opts
.output_buildbot_annotations
:
1631 metric_values
+= self
.ParseMetricValuesFromOutput(metric
, output
)
1633 elapsed_minutes
= (time
.time() - start_time
) / 60.0
1635 if elapsed_minutes
>= self
.opts
.max_time_minutes
or not metric_values
:
1640 # Need to get the average value if there were multiple values.
1642 truncated_mean
= CalculateTruncatedMean(metric_values
,
1643 self
.opts
.truncate_percent
)
1644 standard_err
= CalculateStandardError(metric_values
)
1645 standard_dev
= CalculateStandardDeviation(metric_values
)
1648 'mean': truncated_mean
,
1649 'std_err': standard_err
,
1650 'std_dev': standard_dev
,
1651 'values': metric_values
,
1654 print 'Results of performance test: %12f %12f' % (
1655 truncated_mean
, standard_err
)
1657 return (values
, 0, output_of_all_runs
)
1659 return ('Invalid metric specified, or no values returned from '
1660 'performance test.', -1, output_of_all_runs
)
1662 def FindAllRevisionsToSync(self
, revision
, depot
):
1663 """Finds all dependant revisions and depots that need to be synced for a
1664 given revision. This is only useful in the git workflow, as an svn depot
1665 may be split into multiple mirrors.
1667 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
1668 skia/include. To sync skia/src properly, one has to find the proper
1669 revisions in skia/gyp and skia/include.
1672 revision: The revision to sync to.
1673 depot: The depot in use at the moment (probably skia).
1676 A list of [depot, revision] pairs that need to be synced.
1678 revisions_to_sync
= [[depot
, revision
]]
1680 is_base
= ((depot
== 'chromium') or (depot
== 'cros') or
1681 (depot
== 'android-chrome'))
1683 # Some SVN depots were split into multiple git depots, so we need to
1684 # figure out for each mirror which git revision to grab. There's no
1685 # guarantee that the SVN revision will exist for each of the dependant
1686 # depots, so we have to grep the git logs and grab the next earlier one.
1688 DEPOT_DEPS_NAME
[depot
]['depends'] and\
1689 self
.source_control
.IsGit():
1690 svn_rev
= self
.source_control
.SVNFindRev(revision
)
1692 for d
in DEPOT_DEPS_NAME
[depot
]['depends']:
1693 self
.ChangeToDepotWorkingDirectory(d
)
1695 dependant_rev
= self
.source_control
.ResolveToRevision(svn_rev
, d
, -1000)
1698 revisions_to_sync
.append([d
, dependant_rev
])
1700 num_resolved
= len(revisions_to_sync
)
1701 num_needed
= len(DEPOT_DEPS_NAME
[depot
]['depends'])
1703 self
.ChangeToDepotWorkingDirectory(depot
)
1705 if not ((num_resolved
- 1) == num_needed
):
1708 return revisions_to_sync
1710 def PerformPreBuildCleanup(self
):
1711 """Performs necessary cleanup between runs."""
1712 print 'Cleaning up between runs.'
1715 # Having these pyc files around between runs can confuse the
1716 # perf tests and cause them to crash.
1717 for (path
, dir, files
) in os
.walk(self
.src_cwd
):
1718 for cur_file
in files
:
1719 if cur_file
.endswith('.pyc'):
1720 path_to_file
= os
.path
.join(path
, cur_file
)
1721 os
.remove(path_to_file
)
1723 def PerformWebkitDirectoryCleanup(self
, revision
):
1724 """If the script is switching between Blink and WebKit during bisect,
1725 its faster to just delete the directory rather than leave it up to git
1731 if not self
.source_control
.CheckoutFileAtRevision(
1732 bisect_utils
.FILE_DEPS_GIT
, revision
, cwd
=self
.src_cwd
):
1736 os
.chdir(self
.src_cwd
)
1738 is_blink
= bisect_utils
.IsDepsFileBlink()
1742 if not self
.source_control
.RevertFileToHead(
1743 bisect_utils
.FILE_DEPS_GIT
):
1746 if self
.was_blink
!= is_blink
:
1747 self
.was_blink
= is_blink
1748 return bisect_utils
.RemoveThirdPartyWebkitDirectory()
1751 def PerformCrosChrootCleanup(self
):
1752 """Deletes the chroot.
1758 self
.ChangeToDepotWorkingDirectory('cros')
1759 cmd
= [CROS_SDK_PATH
, '--delete']
1760 return_code
= RunProcess(cmd
)
1762 return not return_code
1764 def CreateCrosChroot(self
):
1765 """Creates a new chroot.
1771 self
.ChangeToDepotWorkingDirectory('cros')
1772 cmd
= [CROS_SDK_PATH
, '--create']
1773 return_code
= RunProcess(cmd
)
1775 return not return_code
1777 def PerformPreSyncCleanup(self
, revision
, depot
):
1778 """Performs any necessary cleanup before syncing.
1783 if depot
== 'chromium':
1784 if not bisect_utils
.RemoveThirdPartyLibjingleDirectory():
1786 return self
.PerformWebkitDirectoryCleanup(revision
)
1787 elif depot
== 'cros':
1788 return self
.PerformCrosChrootCleanup()
1791 def RunPostSync(self
, depot
):
1792 """Performs any work after syncing.
1797 if self
.opts
.target_platform
== 'android':
1798 if not bisect_utils
.SetupAndroidBuildEnvironment(self
.opts
,
1799 path_to_src
=self
.src_cwd
):
1803 return self
.CreateCrosChroot()
1805 return self
.RunGClientHooks()
1808 def ShouldSkipRevision(self
, depot
, revision
):
1809 """Some commits can be safely skipped (such as a DEPS roll), since the tool
1810 is git based those changes would have no effect.
1813 depot: The depot being bisected.
1814 revision: Current revision we're synced to.
1817 True if we should skip building/testing this revision.
1819 if depot
== 'chromium':
1820 if self
.source_control
.IsGit():
1821 cmd
= ['diff-tree', '--no-commit-id', '--name-only', '-r', revision
]
1822 output
= CheckRunGit(cmd
)
1824 files
= output
.splitlines()
1826 if len(files
) == 1 and files
[0] == 'DEPS':
1831 def SyncBuildAndRunRevision(self
, revision
, depot
, command_to_run
, metric
,
1833 """Performs a full sync/build/run of the specified revision.
1836 revision: The revision to sync to.
1837 depot: The depot that's being used at the moment (src, webkit, etc.)
1838 command_to_run: The command to execute the performance test.
1839 metric: The performance metric being tested.
1842 On success, a tuple containing the results of the performance test.
1843 Otherwise, a tuple with the error message.
1846 if depot
== 'chromium' or depot
== 'android-chrome':
1847 sync_client
= 'gclient'
1848 elif depot
== 'cros':
1849 sync_client
= 'repo'
1851 revisions_to_sync
= self
.FindAllRevisionsToSync(revision
, depot
)
1853 if not revisions_to_sync
:
1854 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL
)
1856 if not self
.PerformPreSyncCleanup(revision
, depot
):
1857 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL
)
1861 if not self
.opts
.debug_ignore_sync
:
1862 for r
in revisions_to_sync
:
1863 self
.ChangeToDepotWorkingDirectory(r
[0])
1866 self
.PerformPreBuildCleanup()
1868 # If you're using gclient to sync, you need to specify the depot you
1869 # want so that all the dependencies sync properly as well.
1870 # ie. gclient sync src@<SHA1>
1871 current_revision
= r
[1]
1872 if sync_client
== 'gclient':
1873 current_revision
= '%s@%s' % (DEPOT_DEPS_NAME
[depot
]['src'],
1875 if not self
.source_control
.SyncToRevision(current_revision
,
1882 success
= self
.RunPostSync(depot
)
1884 if skippable
and self
.ShouldSkipRevision(depot
, revision
):
1885 return ('Skipped revision: [%s]' % str(revision
),
1886 BUILD_RESULT_SKIPPED
)
1888 start_build_time
= time
.time()
1889 if self
.BuildCurrentRevision(depot
, revision
):
1890 after_build_time
= time
.time()
1891 results
= self
.RunPerformanceTestAndParseResults(command_to_run
,
1893 # Restore build output directory once the tests are done, to avoid
1895 if depot
== 'chromium' and self
.opts
.gs_bucket
and revision
:
1896 self
.BackupOrRestoreOutputdirectory(restore
=True)
1899 external_revisions
= self
.Get3rdPartyRevisionsFromCurrentRevision(
1902 if not external_revisions
is None:
1903 return (results
[0], results
[1], external_revisions
,
1904 time
.time() - after_build_time
, after_build_time
-
1907 return ('Failed to parse DEPS file for external revisions.',
1912 return ('Failed to build revision: [%s]' % (str(revision
, )),
1915 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL
)
1917 return ('Failed to sync revision: [%s]' % (str(revision
, )),
1920 def CheckIfRunPassed(self
, current_value
, known_good_value
, known_bad_value
):
1921 """Given known good and bad values, decide if the current_value passed
1925 current_value: The value of the metric being checked.
1926 known_bad_value: The reference value for a "failed" run.
1927 known_good_value: The reference value for a "passed" run.
1930 True if the current_value is closer to the known_good_value than the
1933 dist_to_good_value
= abs(current_value
['mean'] - known_good_value
['mean'])
1934 dist_to_bad_value
= abs(current_value
['mean'] - known_bad_value
['mean'])
1936 return dist_to_good_value
< dist_to_bad_value
1938 def _GetDepotDirectory(self
, depot_name
):
1939 if depot_name
== 'chromium':
1941 elif depot_name
== 'cros':
1942 return self
.cros_cwd
1943 elif depot_name
in DEPOT_NAMES
:
1944 return self
.depot_cwd
[depot_name
]
1946 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
1947 ' was added without proper support?' %\
1950 def ChangeToDepotWorkingDirectory(self
, depot_name
):
1951 """Given a depot, changes to the appropriate working directory.
1954 depot_name: The name of the depot (see DEPOT_NAMES).
1956 os
.chdir(self
._GetDepotDirectory
(depot_name
))
1958 def _FillInV8BleedingEdgeInfo(self
, min_revision_data
, max_revision_data
):
1959 r1
= self
._GetNearestV
8BleedingEdgeFromTrunk
(min_revision_data
['revision'],
1960 search_forward
=True)
1961 r2
= self
._GetNearestV
8BleedingEdgeFromTrunk
(max_revision_data
['revision'],
1962 search_forward
=False)
1963 min_revision_data
['external']['v8_bleeding_edge'] = r1
1964 max_revision_data
['external']['v8_bleeding_edge'] = r2
1966 if (not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
1967 min_revision_data
['revision']) or
1968 not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
1969 max_revision_data
['revision'])):
1970 self
.warnings
.append('Trunk revisions in V8 did not map directly to '
1971 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
1972 'did map directly to bleeding_edge revisions, but results might not '
1975 def _FindNextDepotToBisect(self
, current_depot
, current_revision
,
1976 min_revision_data
, max_revision_data
):
1977 """Given the state of the bisect, decides which depot the script should
1978 dive into next (if any).
1981 current_depot: Current depot being bisected.
1982 current_revision: Current revision synced to.
1983 min_revision_data: Data about the earliest revision in the bisect range.
1984 max_revision_data: Data about the latest revision in the bisect range.
1987 The depot to bisect next, or None.
1989 external_depot
= None
1990 for next_depot
in DEPOT_NAMES
:
1991 if DEPOT_DEPS_NAME
[next_depot
].has_key('platform'):
1992 if DEPOT_DEPS_NAME
[next_depot
]['platform'] != os
.name
:
1995 if not (DEPOT_DEPS_NAME
[next_depot
]["recurse"] and
1996 min_revision_data
['depot'] in DEPOT_DEPS_NAME
[next_depot
]['from']):
1999 if current_depot
== 'v8':
2000 # We grab the bleeding_edge info here rather than earlier because we
2001 # finally have the revision range. From that we can search forwards and
2002 # backwards to try to match trunk revisions to bleeding_edge.
2003 self
._FillInV
8BleedingEdgeInfo
(min_revision_data
, max_revision_data
)
2005 if (min_revision_data
['external'][next_depot
] ==
2006 max_revision_data
['external'][next_depot
]):
2009 if (min_revision_data
['external'][next_depot
] and
2010 max_revision_data
['external'][next_depot
]):
2011 external_depot
= next_depot
2014 return external_depot
2016 def PrepareToBisectOnDepot(self
,
2022 """Changes to the appropriate directory and gathers a list of revisions
2023 to bisect between |start_revision| and |end_revision|.
2026 current_depot: The depot we want to bisect.
2027 end_revision: End of the revision range.
2028 start_revision: Start of the revision range.
2029 previous_depot: The depot we were previously bisecting.
2030 previous_revision: The last revision we synced to on |previous_depot|.
2033 A list containing the revisions between |start_revision| and
2034 |end_revision| inclusive.
2036 # Change into working directory of external library to run
2037 # subsequent commands.
2038 self
.ChangeToDepotWorkingDirectory(current_depot
)
2040 # V8 (and possibly others) is merged in periodically. Bisecting
2041 # this directory directly won't give much good info.
2042 if DEPOT_DEPS_NAME
[current_depot
].has_key('custom_deps'):
2043 config_path
= os
.path
.join(self
.src_cwd
, '..')
2044 if bisect_utils
.RunGClientAndCreateConfig(self
.opts
,
2045 DEPOT_DEPS_NAME
[current_depot
]['custom_deps'], cwd
=config_path
):
2047 if bisect_utils
.RunGClient(
2048 ['sync', '--revision', previous_revision
], cwd
=self
.src_cwd
):
2051 if current_depot
== 'v8_bleeding_edge':
2052 self
.ChangeToDepotWorkingDirectory('chromium')
2054 shutil
.move('v8', 'v8.bak')
2055 shutil
.move('v8_bleeding_edge', 'v8')
2057 self
.cleanup_commands
.append(['mv', 'v8', 'v8_bleeding_edge'])
2058 self
.cleanup_commands
.append(['mv', 'v8.bak', 'v8'])
2060 self
.depot_cwd
['v8_bleeding_edge'] = os
.path
.join(self
.src_cwd
, 'v8')
2061 self
.depot_cwd
['v8'] = os
.path
.join(self
.src_cwd
, 'v8.bak')
2063 self
.ChangeToDepotWorkingDirectory(current_depot
)
2065 depot_revision_list
= self
.GetRevisionList(current_depot
,
2069 self
.ChangeToDepotWorkingDirectory('chromium')
2071 return depot_revision_list
2073 def GatherReferenceValues(self
, good_rev
, bad_rev
, cmd
, metric
, target_depot
):
2074 """Gathers reference values by running the performance tests on the
2075 known good and bad revisions.
2078 good_rev: The last known good revision where the performance regression
2079 has not occurred yet.
2080 bad_rev: A revision where the performance regression has already occurred.
2081 cmd: The command to execute the performance test.
2082 metric: The metric being tested for regression.
2085 A tuple with the results of building and running each revision.
2087 bad_run_results
= self
.SyncBuildAndRunRevision(bad_rev
,
2092 good_run_results
= None
2094 if not bad_run_results
[1]:
2095 good_run_results
= self
.SyncBuildAndRunRevision(good_rev
,
2100 return (bad_run_results
, good_run_results
)
2102 def AddRevisionsIntoRevisionData(self
, revisions
, depot
, sort
, revision_data
):
2103 """Adds new revisions to the revision_data dict and initializes them.
2106 revisions: List of revisions to add.
2107 depot: Depot that's currently in use (src, webkit, etc...)
2108 sort: Sorting key for displaying revisions.
2109 revision_data: A dict to add the new revisions into. Existing revisions
2110 will have their sort keys offset.
2113 num_depot_revisions
= len(revisions
)
2115 for k
, v
in revision_data
.iteritems():
2116 if v
['sort'] > sort
:
2117 v
['sort'] += num_depot_revisions
2119 for i
in xrange(num_depot_revisions
):
2122 revision_data
[r
] = {'revision' : r
,
2128 'sort' : i
+ sort
+ 1}
2130 def PrintRevisionsToBisectMessage(self
, revision_list
, depot
):
2131 if self
.opts
.output_buildbot_annotations
:
2132 step_name
= 'Bisection Range: [%s - %s]' % (
2133 revision_list
[len(revision_list
)-1], revision_list
[0])
2134 bisect_utils
.OutputAnnotationStepStart(step_name
)
2137 print 'Revisions to bisect on [%s]:' % depot
2138 for revision_id
in revision_list
:
2139 print ' -> %s' % (revision_id
, )
2142 if self
.opts
.output_buildbot_annotations
:
2143 bisect_utils
.OutputAnnotationStepClosed()
2145 def NudgeRevisionsIfDEPSChange(self
, bad_revision
, good_revision
):
2146 """Checks to see if changes to DEPS file occurred, and that the revision
2147 range also includes the change to .DEPS.git. If it doesn't, attempts to
2148 expand the revision range to include it.
2151 bad_rev: First known bad revision.
2152 good_revision: Last known good revision.
2155 A tuple with the new bad and good revisions.
2157 if self
.source_control
.IsGit() and self
.opts
.target_platform
== 'chromium':
2158 changes_to_deps
= self
.source_control
.QueryFileRevisionHistory(
2159 'DEPS', good_revision
, bad_revision
)
2162 # DEPS file was changed, search from the oldest change to DEPS file to
2163 # bad_revision to see if there are matching .DEPS.git changes.
2164 oldest_deps_change
= changes_to_deps
[-1]
2165 changes_to_gitdeps
= self
.source_control
.QueryFileRevisionHistory(
2166 bisect_utils
.FILE_DEPS_GIT
, oldest_deps_change
, bad_revision
)
2168 if len(changes_to_deps
) != len(changes_to_gitdeps
):
2169 # Grab the timestamp of the last DEPS change
2170 cmd
= ['log', '--format=%ct', '-1', changes_to_deps
[0]]
2171 output
= CheckRunGit(cmd
)
2172 commit_time
= int(output
)
2174 # Try looking for a commit that touches the .DEPS.git file in the
2175 # next 15 minutes after the DEPS file change.
2176 cmd
= ['log', '--format=%H', '-1',
2177 '--before=%d' % (commit_time
+ 900), '--after=%d' % commit_time
,
2178 'origin/master', bisect_utils
.FILE_DEPS_GIT
]
2179 output
= CheckRunGit(cmd
)
2180 output
= output
.strip()
2182 self
.warnings
.append('Detected change to DEPS and modified '
2183 'revision range to include change to .DEPS.git')
2184 return (output
, good_revision
)
2186 self
.warnings
.append('Detected change to DEPS but couldn\'t find '
2187 'matching change to .DEPS.git')
2188 return (bad_revision
, good_revision
)
2190 def CheckIfRevisionsInProperOrder(self
,
2194 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2197 good_revision: Number/tag of the known good revision.
2198 bad_revision: Number/tag of the known bad revision.
2201 True if the revisions are in the proper order (good earlier than bad).
2203 if self
.source_control
.IsGit() and target_depot
!= 'cros':
2204 cmd
= ['log', '--format=%ct', '-1', good_revision
]
2205 cwd
= self
._GetDepotDirectory
(target_depot
)
2207 output
= CheckRunGit(cmd
, cwd
=cwd
)
2208 good_commit_time
= int(output
)
2210 cmd
= ['log', '--format=%ct', '-1', bad_revision
]
2211 output
= CheckRunGit(cmd
, cwd
=cwd
)
2212 bad_commit_time
= int(output
)
2214 return good_commit_time
<= bad_commit_time
2216 # Cros/svn use integers
2217 return int(good_revision
) <= int(bad_revision
)
2219 def Run(self
, command_to_run
, bad_revision_in
, good_revision_in
, metric
):
2220 """Given known good and bad revisions, run a binary search on all
2221 intermediate revisions to determine the CL where the performance regression
2225 command_to_run: Specify the command to execute the performance test.
2226 good_revision: Number/tag of the known good revision.
2227 bad_revision: Number/tag of the known bad revision.
2228 metric: The performance metric to monitor.
2231 A dict with 2 members, 'revision_data' and 'error'. On success,
2232 'revision_data' will contain a dict mapping revision ids to
2233 data about that revision. Each piece of revision data consists of a
2234 dict with the following keys:
2236 'passed': Represents whether the performance test was successful at
2237 that revision. Possible values include: 1 (passed), 0 (failed),
2238 '?' (skipped), 'F' (build failed).
2239 'depot': The depot that this revision is from (ie. WebKit)
2240 'external': If the revision is a 'src' revision, 'external' contains
2241 the revisions of each of the external libraries.
2242 'sort': A sort value for sorting the dict in order of commits.
2259 If an error occurred, the 'error' field will contain the message and
2260 'revision_data' will be empty.
2262 results
= {'revision_data' : {},
2265 # Choose depot to bisect first
2266 target_depot
= 'chromium'
2267 if self
.opts
.target_platform
== 'cros':
2268 target_depot
= 'cros'
2269 elif self
.opts
.target_platform
== 'android-chrome':
2270 target_depot
= 'android-chrome'
2273 self
.ChangeToDepotWorkingDirectory(target_depot
)
2275 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2276 bad_revision
= self
.source_control
.ResolveToRevision(bad_revision_in
,
2278 good_revision
= self
.source_control
.ResolveToRevision(good_revision_in
,
2284 if bad_revision
is None:
2285 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in
,)
2288 if good_revision
is None:
2289 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in
,)
2292 # Check that they didn't accidentally swap good and bad revisions.
2293 if not self
.CheckIfRevisionsInProperOrder(
2294 target_depot
, good_revision
, bad_revision
):
2295 results
['error'] = 'bad_revision < good_revision, did you swap these '\
2299 (bad_revision
, good_revision
) = self
.NudgeRevisionsIfDEPSChange(
2300 bad_revision
, good_revision
)
2302 if self
.opts
.output_buildbot_annotations
:
2303 bisect_utils
.OutputAnnotationStepStart('Gathering Revisions')
2305 print 'Gathering revision range for bisection.'
2306 # Retrieve a list of revisions to do bisection on.
2307 src_revision_list
= self
.GetRevisionList(target_depot
,
2311 if self
.opts
.output_buildbot_annotations
:
2312 bisect_utils
.OutputAnnotationStepClosed()
2314 if src_revision_list
:
2315 # revision_data will store information about a revision such as the
2316 # depot it came from, the webkit/V8 revision at that time,
2317 # performance timing, build state, etc...
2318 revision_data
= results
['revision_data']
2320 # revision_list is the list we're binary searching through at the moment.
2325 for current_revision_id
in src_revision_list
:
2328 revision_data
[current_revision_id
] = {'value' : None,
2330 'depot' : target_depot
,
2334 'sort' : sort_key_ids
}
2335 revision_list
.append(current_revision_id
)
2338 max_revision
= len(revision_list
) - 1
2340 self
.PrintRevisionsToBisectMessage(revision_list
, target_depot
)
2342 if self
.opts
.output_buildbot_annotations
:
2343 bisect_utils
.OutputAnnotationStepStart('Gathering Reference Values')
2345 print 'Gathering reference values for bisection.'
2347 # Perform the performance tests on the good and bad revisions, to get
2349 (bad_results
, good_results
) = self
.GatherReferenceValues(good_revision
,
2355 if self
.opts
.output_buildbot_annotations
:
2356 bisect_utils
.OutputAnnotationStepClosed()
2359 results
['error'] = ('An error occurred while building and running '
2360 'the \'bad\' reference value. The bisect cannot continue without '
2361 'a working \'bad\' revision to start from.\n\nError: %s' %
2366 results
['error'] = ('An error occurred while building and running '
2367 'the \'good\' reference value. The bisect cannot continue without '
2368 'a working \'good\' revision to start from.\n\nError: %s' %
2373 # We need these reference values to determine if later runs should be
2374 # classified as pass or fail.
2375 known_bad_value
= bad_results
[0]
2376 known_good_value
= good_results
[0]
2378 # Can just mark the good and bad revisions explicitly here since we
2379 # already know the results.
2380 bad_revision_data
= revision_data
[revision_list
[0]]
2381 bad_revision_data
['external'] = bad_results
[2]
2382 bad_revision_data
['perf_time'] = bad_results
[3]
2383 bad_revision_data
['build_time'] = bad_results
[4]
2384 bad_revision_data
['passed'] = False
2385 bad_revision_data
['value'] = known_bad_value
2387 good_revision_data
= revision_data
[revision_list
[max_revision
]]
2388 good_revision_data
['external'] = good_results
[2]
2389 good_revision_data
['perf_time'] = good_results
[3]
2390 good_revision_data
['build_time'] = good_results
[4]
2391 good_revision_data
['passed'] = True
2392 good_revision_data
['value'] = known_good_value
2394 next_revision_depot
= target_depot
2397 if not revision_list
:
2400 min_revision_data
= revision_data
[revision_list
[min_revision
]]
2401 max_revision_data
= revision_data
[revision_list
[max_revision
]]
2403 if max_revision
- min_revision
<= 1:
2404 current_depot
= min_revision_data
['depot']
2405 if min_revision_data
['passed'] == '?':
2406 next_revision_index
= min_revision
2407 elif max_revision_data
['passed'] == '?':
2408 next_revision_index
= max_revision
2409 elif current_depot
in ['android-chrome', 'cros', 'chromium', 'v8']:
2410 previous_revision
= revision_list
[min_revision
]
2411 # If there were changes to any of the external libraries we track,
2412 # should bisect the changes there as well.
2413 external_depot
= self
._FindNextDepotToBisect
(current_depot
,
2414 previous_revision
, min_revision_data
, max_revision_data
)
2416 # If there was no change in any of the external depots, the search
2418 if not external_depot
:
2419 if current_depot
== 'v8':
2420 self
.warnings
.append('Unfortunately, V8 bisection couldn\'t '
2421 'continue any further. The script can only bisect into '
2422 'V8\'s bleeding_edge repository if both the current and '
2423 'previous revisions in trunk map directly to revisions in '
2427 earliest_revision
= max_revision_data
['external'][external_depot
]
2428 latest_revision
= min_revision_data
['external'][external_depot
]
2430 new_revision_list
= self
.PrepareToBisectOnDepot(external_depot
,
2433 next_revision_depot
,
2436 if not new_revision_list
:
2437 results
['error'] = 'An error occurred attempting to retrieve'\
2438 ' revision range: [%s..%s]' %\
2439 (earliest_revision
, latest_revision
)
2442 self
.AddRevisionsIntoRevisionData(new_revision_list
,
2444 min_revision_data
['sort'],
2447 # Reset the bisection and perform it on the newly inserted
2449 revision_list
= new_revision_list
2451 max_revision
= len(revision_list
) - 1
2452 sort_key_ids
+= len(revision_list
)
2454 print 'Regression in metric:%s appears to be the result of changes'\
2455 ' in [%s].' % (metric
, external_depot
)
2457 self
.PrintRevisionsToBisectMessage(revision_list
, external_depot
)
2463 next_revision_index
= int((max_revision
- min_revision
) / 2) +\
2466 next_revision_id
= revision_list
[next_revision_index
]
2467 next_revision_data
= revision_data
[next_revision_id
]
2468 next_revision_depot
= next_revision_data
['depot']
2470 self
.ChangeToDepotWorkingDirectory(next_revision_depot
)
2472 if self
.opts
.output_buildbot_annotations
:
2473 step_name
= 'Working on [%s]' % next_revision_id
2474 bisect_utils
.OutputAnnotationStepStart(step_name
)
2476 print 'Working on revision: [%s]' % next_revision_id
2478 run_results
= self
.SyncBuildAndRunRevision(next_revision_id
,
2479 next_revision_depot
,
2481 metric
, skippable
=True)
2483 # If the build is successful, check whether or not the metric
2485 if not run_results
[1]:
2486 if len(run_results
) > 2:
2487 next_revision_data
['external'] = run_results
[2]
2488 next_revision_data
['perf_time'] = run_results
[3]
2489 next_revision_data
['build_time'] = run_results
[4]
2491 passed_regression
= self
.CheckIfRunPassed(run_results
[0],
2495 next_revision_data
['passed'] = passed_regression
2496 next_revision_data
['value'] = run_results
[0]
2498 if passed_regression
:
2499 max_revision
= next_revision_index
2501 min_revision
= next_revision_index
2503 if run_results
[1] == BUILD_RESULT_SKIPPED
:
2504 next_revision_data
['passed'] = 'Skipped'
2505 elif run_results
[1] == BUILD_RESULT_FAIL
:
2506 next_revision_data
['passed'] = 'Build Failed'
2508 print run_results
[0]
2510 # If the build is broken, remove it and redo search.
2511 revision_list
.pop(next_revision_index
)
2515 if self
.opts
.output_buildbot_annotations
:
2516 self
._PrintPartialResults
(results
)
2517 bisect_utils
.OutputAnnotationStepClosed()
2519 # Weren't able to sync and retrieve the revision range.
2520 results
['error'] = 'An error occurred attempting to retrieve revision '\
2521 'range: [%s..%s]' % (good_revision
, bad_revision
)
2525 def _PrintPartialResults(self
, results_dict
):
2526 revision_data
= results_dict
['revision_data']
2527 revision_data_sorted
= sorted(revision_data
.iteritems(),
2528 key
= lambda x
: x
[1]['sort'])
2529 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
2530 first_working_revision
= results_dict
['first_working_revision']
2531 last_broken_revision
= results_dict
['last_broken_revision']
2533 self
._PrintTestedCommitsTable
(revision_data_sorted
,
2534 results_dict
['first_working_revision'],
2535 results_dict
['last_broken_revision'],
2536 100, final_step
=False)
2538 def _PrintConfidence(self
, results_dict
):
2539 # The perf dashboard specifically looks for the string
2540 # "Confidence in Bisection Results: 100%" to decide whether or not
2541 # to cc the author(s). If you change this, please update the perf
2542 # dashboard as well.
2543 print 'Confidence in Bisection Results: %d%%' % results_dict
['confidence']
2545 def _PrintBanner(self
, results_dict
):
2547 print " __o_\___ Aw Snap! We hit a speed bump!"
2548 print "=-O----O-'__.~.___________________________________"
2550 print 'Bisect reproduced a %.02f%% (+-%.02f%%) change in the %s metric.' % (
2551 results_dict
['regression_size'], results_dict
['regression_std_err'],
2552 '/'.join(self
.opts
.metric
))
2553 self
._PrintConfidence
(results_dict
)
2555 def _PrintFailedBanner(self
, results_dict
):
2557 print ('Bisect could not reproduce a change in the '
2558 '%s/%s metric.' % (self
.opts
.metric
[0], self
.opts
.metric
[1]))
2560 self
._PrintConfidence
(results_dict
)
2562 def _GetViewVCLinkFromDepotAndHash(self
, cl
, depot
):
2563 info
= self
.source_control
.QueryRevisionInfo(cl
,
2564 self
._GetDepotDirectory
(depot
))
2565 if depot
and DEPOT_DEPS_NAME
[depot
].has_key('viewvc'):
2567 # Format is "git-svn-id: svn://....@123456 <other data>"
2568 svn_line
= [i
for i
in info
['body'].splitlines() if 'git-svn-id:' in i
]
2569 svn_revision
= svn_line
[0].split('@')
2570 svn_revision
= svn_revision
[1].split(' ')[0]
2571 return DEPOT_DEPS_NAME
[depot
]['viewvc'] + svn_revision
2576 def _PrintRevisionInfo(self
, cl
, info
, depot
=None):
2577 # The perf dashboard specifically looks for the string
2578 # "Author : " to parse out who to cc on a bug. If you change the
2579 # formatting here, please update the perf dashboard as well.
2581 print 'Subject : %s' % info
['subject']
2582 print 'Author : %s' % info
['author']
2583 if not info
['email'].startswith(info
['author']):
2584 print 'Email : %s' % info
['email']
2585 commit_link
= self
._GetViewVCLinkFromDepotAndHash
(cl
, depot
)
2587 print 'Link : %s' % commit_link
2590 print 'Failed to parse svn revision from body:'
2594 print 'Commit : %s' % cl
2595 print 'Date : %s' % info
['date']
2597 def _PrintTestedCommitsTable(self
, revision_data_sorted
,
2598 first_working_revision
, last_broken_revision
, confidence
,
2602 print 'Tested commits:'
2604 print 'Partial results:'
2605 print ' %20s %70s %12s %14s %13s' % ('Depot'.center(20, ' '),
2606 'Commit SHA'.center(70, ' '), 'Mean'.center(12, ' '),
2607 'Std. Error'.center(14, ' '), 'State'.center(13, ' '))
2609 for current_id
, current_data
in revision_data_sorted
:
2610 if current_data
['value']:
2611 if (current_id
== last_broken_revision
or
2612 current_id
== first_working_revision
):
2613 # If confidence is too low, don't add this empty line since it's
2614 # used to put focus on a suspected CL.
2615 if confidence
and final_step
:
2618 if state
== 2 and not final_step
:
2619 # Just want a separation between "bad" and "good" cl's.
2623 if state
== 1 and final_step
:
2624 state_str
= 'Suspected CL'
2628 # If confidence is too low, don't bother outputting good/bad.
2631 state_str
= state_str
.center(13, ' ')
2633 std_error
= ('+-%.02f' %
2634 current_data
['value']['std_err']).center(14, ' ')
2635 mean
= ('%.02f' % current_data
['value']['mean']).center(12, ' ')
2636 cl_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
2637 current_data
['depot'])
2639 cl_link
= current_id
2640 print ' %20s %70s %12s %14s %13s' % (
2641 current_data
['depot'].center(20, ' '), cl_link
.center(70, ' '),
2642 mean
, std_error
, state_str
)
2644 def _PrintReproSteps(self
):
2646 print 'To reproduce locally:'
2647 print '$ ' + self
.opts
.command
2648 if bisect_utils
.IsTelemetryCommand(self
.opts
.command
):
2650 print 'Also consider passing --profiler=list to see available profilers.'
2652 def _PrintOtherRegressions(self
, other_regressions
, revision_data
):
2654 print 'Other regressions may have occurred:'
2655 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
2656 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
2657 for regression
in other_regressions
:
2658 current_id
, previous_id
, confidence
= regression
2659 current_data
= revision_data
[current_id
]
2660 previous_data
= revision_data
[previous_id
]
2662 current_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
2663 current_data
['depot'])
2664 previous_link
= self
._GetViewVCLinkFromDepotAndHash
(previous_id
,
2665 previous_data
['depot'])
2667 # If we can't map it to a viewable URL, at least show the original hash.
2668 if not current_link
:
2669 current_link
= current_id
2670 if not previous_link
:
2671 previous_link
= previous_id
2673 print ' %8s %70s %s' % (
2674 current_data
['depot'], current_link
,
2675 ('%d%%' % confidence
).center(10, ' '))
2676 print ' %8s %70s' % (
2677 previous_data
['depot'], previous_link
)
2680 def _PrintStepTime(self
, revision_data_sorted
):
2681 step_perf_time_avg
= 0.0
2682 step_build_time_avg
= 0.0
2684 for _
, current_data
in revision_data_sorted
:
2685 if current_data
['value']:
2686 step_perf_time_avg
+= current_data
['perf_time']
2687 step_build_time_avg
+= current_data
['build_time']
2690 step_perf_time_avg
= step_perf_time_avg
/ step_count
2691 step_build_time_avg
= step_build_time_avg
/ step_count
2693 print 'Average build time : %s' % datetime
.timedelta(
2694 seconds
=int(step_build_time_avg
))
2695 print 'Average test time : %s' % datetime
.timedelta(
2696 seconds
=int(step_perf_time_avg
))
2698 def _PrintWarnings(self
):
2699 if not self
.warnings
:
2703 for w
in set(self
.warnings
):
2706 def _FindOtherRegressions(self
, revision_data_sorted
, bad_greater_than_good
):
2707 other_regressions
= []
2708 previous_values
= []
2710 for current_id
, current_data
in revision_data_sorted
:
2711 current_values
= current_data
['value']
2713 current_values
= current_values
['values']
2715 confidence
= self
._CalculateConfidence
(previous_values
,
2717 mean_of_prev_runs
= CalculateTruncatedMean(
2718 sum(previous_values
, []), 0)
2719 mean_of_current_runs
= CalculateTruncatedMean(current_values
, 0)
2721 # Check that the potential regression is in the same direction as
2722 # the overall regression. If the mean of the previous runs < the
2723 # mean of the current runs, this local regression is in same
2725 prev_less_than_current
= mean_of_prev_runs
< mean_of_current_runs
2726 is_same_direction
= (prev_less_than_current
if
2727 bad_greater_than_good
else not prev_less_than_current
)
2729 # Only report potential regressions with high confidence.
2730 if is_same_direction
and confidence
> 50:
2731 other_regressions
.append([current_id
, previous_id
, confidence
])
2732 previous_values
.append(current_values
)
2733 previous_id
= current_id
2734 return other_regressions
2736 def _CalculateConfidence(self
, working_means
, broken_means
):
2739 for m
in working_means
:
2740 current_mean
= CalculateTruncatedMean(m
, 0)
2742 bounds_working
[0] = min(current_mean
, bounds_working
[0])
2743 bounds_working
[1] = max(current_mean
, bounds_working
[0])
2745 bounds_working
= [current_mean
, current_mean
]
2746 for m
in broken_means
:
2747 current_mean
= CalculateTruncatedMean(m
, 0)
2749 bounds_broken
[0] = min(current_mean
, bounds_broken
[0])
2750 bounds_broken
[1] = max(current_mean
, bounds_broken
[0])
2752 bounds_broken
= [current_mean
, current_mean
]
2753 dist_between_groups
= min(math
.fabs(bounds_broken
[1] - bounds_working
[0]),
2754 math
.fabs(bounds_broken
[0] - bounds_working
[1]))
2755 working_mean
= sum(working_means
, [])
2756 broken_mean
= sum(broken_means
, [])
2757 len_working_group
= CalculateStandardDeviation(working_mean
)
2758 len_broken_group
= CalculateStandardDeviation(broken_mean
)
2760 confidence
= (dist_between_groups
/ (
2761 max(0.0001, (len_broken_group
+ len_working_group
))))
2762 confidence
= int(min(1.0, max(confidence
, 0.0)) * 100.0)
2765 def _GetResultsDict(self
, revision_data
, revision_data_sorted
):
2766 # Find range where it possibly broke.
2767 first_working_revision
= None
2768 first_working_revision_index
= -1
2769 last_broken_revision
= None
2770 last_broken_revision_index
= -1
2772 for i
in xrange(len(revision_data_sorted
)):
2773 k
, v
= revision_data_sorted
[i
]
2774 if v
['passed'] == 1:
2775 if not first_working_revision
:
2776 first_working_revision
= k
2777 first_working_revision_index
= i
2780 last_broken_revision
= k
2781 last_broken_revision_index
= i
2783 if last_broken_revision
!= None and first_working_revision
!= None:
2785 for i
in xrange(0, last_broken_revision_index
+ 1):
2786 if revision_data_sorted
[i
][1]['value']:
2787 broken_means
.append(revision_data_sorted
[i
][1]['value']['values'])
2790 for i
in xrange(first_working_revision_index
, len(revision_data_sorted
)):
2791 if revision_data_sorted
[i
][1]['value']:
2792 working_means
.append(revision_data_sorted
[i
][1]['value']['values'])
2794 # Flatten the lists to calculate mean of all values.
2795 working_mean
= sum(working_means
, [])
2796 broken_mean
= sum(broken_means
, [])
2798 # Calculate the approximate size of the regression
2799 mean_of_bad_runs
= CalculateTruncatedMean(broken_mean
, 0.0)
2800 mean_of_good_runs
= CalculateTruncatedMean(working_mean
, 0.0)
2802 regression_size
= math
.fabs(max(mean_of_good_runs
, mean_of_bad_runs
) /
2803 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0 - 100.0
2805 regression_std_err
= math
.fabs(CalculatePooledStandardError(
2806 [working_mean
, broken_mean
]) /
2807 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0
2809 # Give a "confidence" in the bisect. At the moment we use how distinct the
2810 # values are before and after the last broken revision, and how noisy the
2812 confidence
= self
._CalculateConfidence
(working_means
, broken_means
)
2814 culprit_revisions
= []
2817 self
.ChangeToDepotWorkingDirectory(
2818 revision_data
[last_broken_revision
]['depot'])
2820 if revision_data
[last_broken_revision
]['depot'] == 'cros':
2821 # Want to get a list of all the commits and what depots they belong
2822 # to so that we can grab info about each.
2823 cmd
= ['repo', 'forall', '-c',
2824 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2825 last_broken_revision
, first_working_revision
+ 1)]
2826 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
2829 assert not return_code
, 'An error occurred while running'\
2830 ' "%s"' % ' '.join(cmd
)
2833 for l
in output
.split('\n'):
2835 # Output will be in form:
2837 # /path_to_other_depot
2845 contents
= l
.split(' ')
2846 if len(contents
) > 1:
2847 changes
.append([last_depot
, contents
[0]])
2850 info
= self
.source_control
.QueryRevisionInfo(c
[1])
2851 culprit_revisions
.append((c
[1], info
, None))
2853 for i
in xrange(last_broken_revision_index
, len(revision_data_sorted
)):
2854 k
, v
= revision_data_sorted
[i
]
2855 if k
== first_working_revision
:
2857 self
.ChangeToDepotWorkingDirectory(v
['depot'])
2858 info
= self
.source_control
.QueryRevisionInfo(k
)
2859 culprit_revisions
.append((k
, info
, v
['depot']))
2862 # Check for any other possible regression ranges
2863 other_regressions
= self
._FindOtherRegressions
(revision_data_sorted
,
2864 mean_of_bad_runs
> mean_of_good_runs
)
2866 # Check for warnings:
2867 if len(culprit_revisions
) > 1:
2868 self
.warnings
.append('Due to build errors, regression range could '
2869 'not be narrowed down to a single commit.')
2870 if self
.opts
.repeat_test_count
== 1:
2871 self
.warnings
.append('Tests were only set to run once. This may '
2872 'be insufficient to get meaningful results.')
2873 if confidence
< 100:
2875 self
.warnings
.append(
2876 'Confidence is less than 100%. There could be other candidates for '
2877 'this regression. Try bisecting again with increased repeat_count '
2878 'or on a sub-metric that shows the regression more clearly.')
2880 self
.warnings
.append(
2881 'Confidence is 0%. Try bisecting again on another platform, with '
2882 'increased repeat_count or on a sub-metric that shows the regression '
2886 'first_working_revision': first_working_revision
,
2887 'last_broken_revision': last_broken_revision
,
2888 'culprit_revisions': culprit_revisions
,
2889 'other_regressions': other_regressions
,
2890 'regression_size': regression_size
,
2891 'regression_std_err': regression_std_err
,
2892 'confidence': confidence
,
2895 def FormatAndPrintResults(self
, bisect_results
):
2896 """Prints the results from a bisection run in a readable format.
2899 bisect_results: The results from a bisection test run.
2901 revision_data
= bisect_results
['revision_data']
2902 revision_data_sorted
= sorted(revision_data
.iteritems(),
2903 key
= lambda x
: x
[1]['sort'])
2904 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
2906 if self
.opts
.output_buildbot_annotations
:
2907 bisect_utils
.OutputAnnotationStepStart('Build Status Per Revision')
2910 print 'Full results of bisection:'
2911 for current_id
, current_data
in revision_data_sorted
:
2912 build_status
= current_data
['passed']
2914 if type(build_status
) is bool:
2916 build_status
= 'Good'
2918 build_status
= 'Bad'
2920 print ' %20s %40s %s' % (current_data
['depot'],
2921 current_id
, build_status
)
2924 if self
.opts
.output_buildbot_annotations
:
2925 bisect_utils
.OutputAnnotationStepClosed()
2926 # The perf dashboard scrapes the "results" step in order to comment on
2927 # bugs. If you change this, please update the perf dashboard as well.
2928 bisect_utils
.OutputAnnotationStepStart('Results')
2930 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
2931 self
._PrintBanner
(results_dict
)
2932 for culprit
in results_dict
['culprit_revisions']:
2933 cl
, info
, depot
= culprit
2934 self
._PrintRevisionInfo
(cl
, info
, depot
)
2935 self
._PrintReproSteps
()
2936 if results_dict
['other_regressions']:
2937 self
._PrintOtherRegressions
(results_dict
['other_regressions'],
2940 self
._PrintFailedBanner
(results_dict
)
2941 self
._PrintReproSteps
()
2943 self
._PrintTestedCommitsTable
(revision_data_sorted
,
2944 results_dict
['first_working_revision'],
2945 results_dict
['last_broken_revision'],
2946 results_dict
['confidence'])
2947 self
._PrintStepTime
(revision_data_sorted
)
2948 self
._PrintWarnings
()
2950 if self
.opts
.output_buildbot_annotations
:
2951 bisect_utils
.OutputAnnotationStepClosed()
2954 def DetermineAndCreateSourceControl(opts
):
2955 """Attempts to determine the underlying source control workflow and returns
2956 a SourceControl object.
2959 An instance of a SourceControl object, or None if the current workflow
2963 (output
, return_code
) = RunGit(['rev-parse', '--is-inside-work-tree'])
2965 if output
.strip() == 'true':
2966 return GitSourceControl(opts
)
2971 def IsPlatformSupported(opts
):
2972 """Checks that this platform and build system are supported.
2975 opts: The options parsed from the command line.
2978 True if the platform and build system are supported.
2980 # Haven't tested the script out on any other platforms yet.
2981 supported
= ['posix', 'nt']
2982 return os
.name
in supported
2985 def RmTreeAndMkDir(path_to_dir
, skip_makedir
=False):
2986 """Removes the directory tree specified, and then creates an empty
2987 directory in the same location (if not specified to skip).
2990 path_to_dir: Path to the directory tree.
2991 skip_makedir: Whether to skip creating empty directory, default is False.
2994 True if successful, False if an error occurred.
2997 if os
.path
.exists(path_to_dir
):
2998 shutil
.rmtree(path_to_dir
)
3000 if e
.errno
!= errno
.ENOENT
:
3003 if not skip_makedir
:
3004 return MaybeMakeDirectory(path_to_dir
)
3009 def RemoveBuildFiles():
3010 """Removes build files from previous runs."""
3011 if RmTreeAndMkDir(os
.path
.join('out', 'Release')):
3012 if RmTreeAndMkDir(os
.path
.join('build', 'Release')):
3017 class BisectOptions(object):
3018 """Options to be used when running bisection."""
3020 super(BisectOptions
, self
).__init
__()
3022 self
.target_platform
= 'chromium'
3023 self
.build_preference
= None
3024 self
.good_revision
= None
3025 self
.bad_revision
= None
3026 self
.use_goma
= None
3027 self
.cros_board
= None
3028 self
.cros_remote_ip
= None
3029 self
.repeat_test_count
= 20
3030 self
.truncate_percent
= 25
3031 self
.max_time_minutes
= 20
3034 self
.output_buildbot_annotations
= None
3035 self
.no_custom_deps
= False
3036 self
.working_directory
= None
3037 self
.extra_src
= None
3038 self
.debug_ignore_build
= None
3039 self
.debug_ignore_sync
= None
3040 self
.debug_ignore_perf_test
= None
3041 self
.gs_bucket
= None
3042 self
.target_arch
= 'ia32'
3044 def _CreateCommandLineParser(self
):
3045 """Creates a parser with bisect options.
3048 An instance of optparse.OptionParser.
3050 usage
= ('%prog [options] [-- chromium-options]\n'
3051 'Perform binary search on revision history to find a minimal '
3052 'range of revisions where a peformance metric regressed.\n')
3054 parser
= optparse
.OptionParser(usage
=usage
)
3056 group
= optparse
.OptionGroup(parser
, 'Bisect options')
3057 group
.add_option('-c', '--command',
3059 help='A command to execute your performance test at' +
3060 ' each point in the bisection.')
3061 group
.add_option('-b', '--bad_revision',
3063 help='A bad revision to start bisection. ' +
3064 'Must be later than good revision. May be either a git' +
3065 ' or svn revision.')
3066 group
.add_option('-g', '--good_revision',
3068 help='A revision to start bisection where performance' +
3069 ' test is known to pass. Must be earlier than the ' +
3070 'bad revision. May be either a git or svn revision.')
3071 group
.add_option('-m', '--metric',
3073 help='The desired metric to bisect on. For example ' +
3074 '"vm_rss_final_b/vm_rss_f_b"')
3075 group
.add_option('-r', '--repeat_test_count',
3078 help='The number of times to repeat the performance '
3079 'test. Values will be clamped to range [1, 100]. '
3080 'Default value is 20.')
3081 group
.add_option('--max_time_minutes',
3084 help='The maximum time (in minutes) to take running the '
3085 'performance tests. The script will run the performance '
3086 'tests according to --repeat_test_count, so long as it '
3087 'doesn\'t exceed --max_time_minutes. Values will be '
3088 'clamped to range [1, 60].'
3089 'Default value is 20.')
3090 group
.add_option('-t', '--truncate_percent',
3093 help='The highest/lowest % are discarded to form a '
3094 'truncated mean. Values will be clamped to range [0, '
3095 '25]. Default value is 25 (highest/lowest 25% will be '
3097 parser
.add_option_group(group
)
3099 group
= optparse
.OptionGroup(parser
, 'Build options')
3100 group
.add_option('-w', '--working_directory',
3102 help='Path to the working directory where the script '
3103 'will do an initial checkout of the chromium depot. The '
3104 'files will be placed in a subdirectory "bisect" under '
3105 'working_directory and that will be used to perform the '
3106 'bisection. This parameter is optional, if it is not '
3107 'supplied, the script will work from the current depot.')
3108 group
.add_option('--build_preference',
3110 choices
=['msvs', 'ninja', 'make'],
3111 help='The preferred build system to use. On linux/mac '
3112 'the options are make/ninja. On Windows, the options '
3114 group
.add_option('--target_platform',
3116 choices
=['chromium', 'cros', 'android', 'android-chrome'],
3118 help='The target platform. Choices are "chromium" '
3119 '(current platform), "cros", or "android". If you '
3120 'specify something other than "chromium", you must be '
3121 'properly set up to build that platform.')
3122 group
.add_option('--no_custom_deps',
3123 dest
='no_custom_deps',
3124 action
="store_true",
3126 help='Run the script with custom_deps or not.')
3127 group
.add_option('--extra_src',
3129 help='Path to a script which can be used to modify '
3130 'the bisect script\'s behavior.')
3131 group
.add_option('--cros_board',
3133 help='The cros board type to build.')
3134 group
.add_option('--cros_remote_ip',
3136 help='The remote machine to image to.')
3137 group
.add_option('--use_goma',
3138 action
="store_true",
3139 help='Add a bunch of extra threads for goma.')
3140 group
.add_option('--output_buildbot_annotations',
3141 action
="store_true",
3142 help='Add extra annotation output for buildbot.')
3143 group
.add_option('--gs_bucket',
3147 help=('Name of Google Storage bucket to upload or '
3148 'download build. e.g., chrome-perf'))
3149 group
.add_option('--target_arch',
3151 choices
=['ia32', 'x64', 'arm'],
3154 help=('The target build architecture. Choices are "ia32" '
3155 '(default), "x64" or "arm".'))
3157 parser
.add_option_group(group
)
3159 group
= optparse
.OptionGroup(parser
, 'Debug options')
3160 group
.add_option('--debug_ignore_build',
3161 action
="store_true",
3162 help='DEBUG: Don\'t perform builds.')
3163 group
.add_option('--debug_ignore_sync',
3164 action
="store_true",
3165 help='DEBUG: Don\'t perform syncs.')
3166 group
.add_option('--debug_ignore_perf_test',
3167 action
="store_true",
3168 help='DEBUG: Don\'t perform performance tests.')
3169 parser
.add_option_group(group
)
3174 def ParseCommandLine(self
):
3175 """Parses the command line for bisect options."""
3176 parser
= self
._CreateCommandLineParser
()
3177 (opts
, args
) = parser
.parse_args()
3180 if not opts
.command
:
3181 raise RuntimeError('missing required parameter: --command')
3183 if not opts
.good_revision
:
3184 raise RuntimeError('missing required parameter: --good_revision')
3186 if not opts
.bad_revision
:
3187 raise RuntimeError('missing required parameter: --bad_revision')
3190 raise RuntimeError('missing required parameter: --metric')
3193 if not cloud_storage
.List(opts
.gs_bucket
):
3194 raise RuntimeError('Invalid Google Storage URL: [%s]', e
)
3196 if opts
.target_platform
== 'cros':
3197 # Run sudo up front to make sure credentials are cached for later.
3198 print 'Sudo is required to build cros:'
3200 RunProcess(['sudo', 'true'])
3202 if not opts
.cros_board
:
3203 raise RuntimeError('missing required parameter: --cros_board')
3205 if not opts
.cros_remote_ip
:
3206 raise RuntimeError('missing required parameter: --cros_remote_ip')
3208 if not opts
.working_directory
:
3209 raise RuntimeError('missing required parameter: --working_directory')
3211 metric_values
= opts
.metric
.split('/')
3212 if len(metric_values
) != 2:
3213 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
3215 opts
.metric
= metric_values
3216 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3217 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3218 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3219 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3221 for k
, v
in opts
.__dict
__.iteritems():
3222 assert hasattr(self
, k
), "Invalid %s attribute in BisectOptions." % k
3224 except RuntimeError, e
:
3225 output_string
= StringIO
.StringIO()
3226 parser
.print_help(file=output_string
)
3227 error_message
= '%s\n\n%s' % (e
.message
, output_string
.getvalue())
3228 output_string
.close()
3229 raise RuntimeError(error_message
)
3232 def FromDict(values
):
3233 """Creates an instance of BisectOptions with the values parsed from a
3237 values: a dict containing options to set.
3240 An instance of BisectOptions.
3242 opts
= BisectOptions()
3243 for k
, v
in values
.iteritems():
3244 assert hasattr(opts
, k
), 'Invalid %s attribute in '\
3245 'BisectOptions.' % k
3248 metric_values
= opts
.metric
.split('/')
3249 if len(metric_values
) != 2:
3250 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
3252 opts
.metric
= metric_values
3253 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
3254 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
3255 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
3256 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
3264 opts
= BisectOptions()
3265 parse_results
= opts
.ParseCommandLine()
3268 extra_src
= bisect_utils
.LoadExtraSrc(opts
.extra_src
)
3270 raise RuntimeError("Invalid or missing --extra_src.")
3271 _AddAdditionalDepotInfo(extra_src
.GetAdditionalDepotInfo())
3273 if opts
.working_directory
:
3274 custom_deps
= bisect_utils
.DEFAULT_GCLIENT_CUSTOM_DEPS
3275 if opts
.no_custom_deps
:
3277 bisect_utils
.CreateBisectDirectoryAndSetupDepot(opts
, custom_deps
)
3279 os
.chdir(os
.path
.join(os
.getcwd(), 'src'))
3281 if not RemoveBuildFiles():
3282 raise RuntimeError('Something went wrong removing the build files.')
3284 if not IsPlatformSupported(opts
):
3285 raise RuntimeError("Sorry, this platform isn't supported yet.")
3287 # Check what source control method they're using. Only support git workflow
3289 source_control
= DetermineAndCreateSourceControl(opts
)
3291 if not source_control
:
3292 raise RuntimeError("Sorry, only the git workflow is supported at the "
3295 # gClient sync seems to fail if you're not in master branch.
3296 if (not source_control
.IsInProperBranch() and
3297 not opts
.debug_ignore_sync
and
3298 not opts
.working_directory
):
3299 raise RuntimeError("You must switch to master branch to run bisection.")
3301 bisect_test
= BisectPerformanceMetrics(source_control
, opts
)
3303 bisect_results
= bisect_test
.Run(opts
.command
,
3307 if bisect_results
['error']:
3308 raise RuntimeError(bisect_results
['error'])
3309 bisect_test
.FormatAndPrintResults(bisect_results
)
3312 bisect_test
.PerformCleanup()
3313 except RuntimeError, e
:
3314 if opts
.output_buildbot_annotations
:
3315 # The perf dashboard scrapes the "results" step in order to comment on
3316 # bugs. If you change this, please update the perf dashboard as well.
3317 bisect_utils
.OutputAnnotationStepStart('Results')
3318 print 'Error: %s' % e
.message
3319 if opts
.output_buildbot_annotations
:
3320 bisect_utils
.OutputAnnotationStepClosed()
3323 if __name__
== '__main__':