Correctly initialize PepperPluginInstanceImpl::layer_is_hardware_
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blob2db8c8d66e068575c059bf467fca071855385a86
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
36 """
38 import copy
39 import datetime
40 import errno
41 import hashlib
42 import math
43 import optparse
44 import os
45 import re
46 import shlex
47 import shutil
48 import StringIO
49 import subprocess
50 import sys
51 import time
52 import zipfile
54 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
56 import bisect_utils
57 import post_perf_builder_job
58 from telemetry.page import cloud_storage
60 # The additional repositories that might need to be bisected.
61 # If the repository has any dependant repositories (such as skia/src needs
62 # skia/include and skia/gyp to be updated), specify them in the 'depends'
63 # so that they're synced appropriately.
64 # Format is:
65 # src: path to the working directory.
66 # recurse: True if this repositry will get bisected.
67 # depends: A list of other repositories that are actually part of the same
68 # repository in svn.
69 # svn: Needed for git workflow to resolve hashes to svn revisions.
70 # from: Parent depot that must be bisected before this is bisected.
71 # deps_var: Key name in vars varible in DEPS file that has revision information.
72 DEPOT_DEPS_NAME = {
73 'chromium' : {
74 "src" : "src",
75 "recurse" : True,
76 "depends" : None,
77 "from" : ['cros', 'android-chrome'],
78 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79 'deps_var': 'chromium_rev'
81 'webkit' : {
82 "src" : "src/third_party/WebKit",
83 "recurse" : True,
84 "depends" : None,
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87 'deps_var': 'webkit_revision'
89 'angle' : {
90 "src" : "src/third_party/angle",
91 "src_old" : "src/third_party/angle_dx11",
92 "recurse" : True,
93 "depends" : None,
94 "from" : ['chromium'],
95 "platform": 'nt',
96 'deps_var': 'angle_revision'
98 'v8' : {
99 "src" : "src/v8",
100 "recurse" : True,
101 "depends" : None,
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105 'deps_var': 'v8_revision'
107 'v8_bleeding_edge' : {
108 "src" : "src/v8_bleeding_edge",
109 "recurse" : True,
110 "depends" : None,
111 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
112 "from" : ['v8'],
113 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'deps_var': 'v8_revision'
116 'skia/src' : {
117 "src" : "src/third_party/skia/src",
118 "recurse" : True,
119 "svn" : "http://skia.googlecode.com/svn/trunk/src",
120 "depends" : ['skia/include', 'skia/gyp'],
121 "from" : ['chromium'],
122 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 'deps_var': 'skia_revision'
125 'skia/include' : {
126 "src" : "src/third_party/skia/include",
127 "recurse" : False,
128 "svn" : "http://skia.googlecode.com/svn/trunk/include",
129 "depends" : None,
130 "from" : ['chromium'],
131 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
132 'deps_var': 'None'
134 'skia/gyp' : {
135 "src" : "src/third_party/skia/gyp",
136 "recurse" : False,
137 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
138 "depends" : None,
139 "from" : ['chromium'],
140 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
141 'deps_var': 'None'
145 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
146 CROS_SDK_PATH = os.path.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147 CROS_VERSION_PATTERN = 'new version number from %s'
148 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
149 CROS_TEST_KEY_PATH = os.path.join('..', 'cros', 'chromite', 'ssh_keys',
150 'testing_rsa')
151 CROS_SCRIPT_KEY_PATH = os.path.join('..', 'cros', 'src', 'scripts',
152 'mod_for_test_scripts', 'ssh_keys',
153 'testing_rsa')
155 BUILD_RESULT_SUCCEED = 0
156 BUILD_RESULT_FAIL = 1
157 BUILD_RESULT_SKIPPED = 2
159 # Maximum time in seconds to wait after posting build request to tryserver.
160 # TODO: Change these values based on the actual time taken by buildbots on
161 # the tryserver.
162 MAX_MAC_BUILD_TIME = 14400
163 MAX_WIN_BUILD_TIME = 14400
164 MAX_LINUX_BUILD_TIME = 14400
166 # Patch template to add a new file, DEPS.sha under src folder.
167 # This file contains SHA1 value of the DEPS changes made while bisecting
168 # dependency repositories. This patch send along with DEPS patch to tryserver.
169 # When a build requested is posted with a patch, bisect builders on tryserver,
170 # once build is produced, it reads SHA value from this file and appends it
171 # to build archive filename.
172 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
173 new file mode 100644
174 --- /dev/null
175 +++ src/DEPS.sha
176 @@ -0,0 +1 @@
177 +%(deps_sha)s
180 # The possible values of the --bisect_mode flag, which determines what to
181 # use when classifying a revision as "good" or "bad".
182 BISECT_MODE_MEAN = 'mean'
183 BISECT_MODE_STD_DEV = 'std_dev'
184 BISECT_MODE_RETURN_CODE = 'return_code'
187 def _AddAdditionalDepotInfo(depot_info):
188 """Adds additional depot info to the global depot variables."""
189 global DEPOT_DEPS_NAME
190 global DEPOT_NAMES
191 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() +
192 depot_info.items())
193 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
196 def CalculateTruncatedMean(data_set, truncate_percent):
197 """Calculates the truncated mean of a set of values.
199 Note that this isn't just the mean of the set of values with the highest
200 and lowest values discarded; the non-discarded values are also weighted
201 differently depending how many values are discarded.
203 Args:
204 data_set: Non-empty list of values.
205 truncate_percent: The % from the upper and lower portions of the data set
206 to discard, expressed as a value in [0, 1].
208 Returns:
209 The truncated mean as a float.
211 Raises:
212 TypeError: The data set was empty after discarding values.
214 if len(data_set) > 2:
215 data_set = sorted(data_set)
217 discard_num_float = len(data_set) * truncate_percent
218 discard_num_int = int(math.floor(discard_num_float))
219 kept_weight = len(data_set) - discard_num_float * 2
221 data_set = data_set[discard_num_int:len(data_set)-discard_num_int]
223 weight_left = 1.0 - (discard_num_float - discard_num_int)
225 if weight_left < 1:
226 # If the % to discard leaves a fractional portion, need to weight those
227 # values.
228 unweighted_vals = data_set[1:len(data_set)-1]
229 weighted_vals = [data_set[0], data_set[len(data_set)-1]]
230 weighted_vals = [w * weight_left for w in weighted_vals]
231 data_set = weighted_vals + unweighted_vals
232 else:
233 kept_weight = len(data_set)
235 truncated_mean = reduce(lambda x, y: float(x) + float(y),
236 data_set) / kept_weight
238 return truncated_mean
241 def CalculateMean(values):
242 """Calculates the arithmetic mean of a list of values."""
243 return CalculateTruncatedMean(values, 0.0)
246 def CalculateConfidence(good_results_lists, bad_results_lists):
247 """Calculates a confidence percentage.
249 This is calculated based on how distinct the "good" and "bad" values are,
250 and how noisy the results are. More precisely, the confidence is the quotient
251 of the difference between the closest values across the good and bad groups
252 and the sum of the standard deviations of the good and bad groups.
254 TODO(qyearsley): Replace this confidence function with a function that
255 uses a Student's t-test. The confidence would be (1 - p-value), where
256 p-value is the probability of obtaining the given a set of good and bad
257 values just by chance.
259 Args:
260 good_results_lists: A list of lists of "good" result numbers.
261 bad_results_lists: A list of lists of "bad" result numbers.
263 Returns:
264 A number between in the range [0, 100].
266 # Get the distance between the two groups.
267 means_good = map(CalculateMean, good_results_lists)
268 means_bad = map(CalculateMean, bad_results_lists)
269 bounds_good = (min(means_good), max(means_good))
270 bounds_bad = (min(means_bad), max(means_bad))
271 dist_between_groups = min(
272 math.fabs(bounds_bad[1] - bounds_good[0]),
273 math.fabs(bounds_bad[0] - bounds_good[1]))
275 # Get the sum of the standard deviations of the two groups.
276 good_results_flattened = sum(good_results_lists, [])
277 bad_results_flattened = sum(bad_results_lists, [])
278 stddev_good = CalculateStandardDeviation(good_results_flattened)
279 stddev_bad = CalculateStandardDeviation(bad_results_flattened)
280 stddev_sum = stddev_good + stddev_bad
282 confidence = dist_between_groups / (max(0.0001, stddev_sum))
283 confidence = int(min(1.0, max(confidence, 0.0)) * 100.0)
284 return confidence
287 def CalculateStandardDeviation(values):
288 """Calculates the sample standard deviation of the given list of values."""
289 if len(values) == 1:
290 return 0.0
292 mean = CalculateMean(values)
293 differences_from_mean = [float(x) - mean for x in values]
294 squared_differences = [float(x * x) for x in differences_from_mean]
295 variance = sum(squared_differences) / (len(values) - 1)
296 std_dev = math.sqrt(variance)
298 return std_dev
301 def CalculateRelativeChange(before, after):
302 """Returns the relative change of before and after, relative to before.
304 There are several different ways to define relative difference between
305 two numbers; sometimes it is defined as relative to the smaller number,
306 or to the mean of the two numbers. This version returns the difference
307 relative to the first of the two numbers.
309 Args:
310 before: A number representing an earlier value.
311 after: Another number, representing a later value.
313 Returns:
314 A non-negative floating point number; 0.1 represents a 10% change.
316 if before == after:
317 return 0.0
318 if before == 0:
319 return float('nan')
320 difference = after - before
321 return math.fabs(difference / before)
324 def CalculatePooledStandardError(work_sets):
325 numerator = 0.0
326 denominator1 = 0.0
327 denominator2 = 0.0
329 for current_set in work_sets:
330 std_dev = CalculateStandardDeviation(current_set)
331 numerator += (len(current_set) - 1) * std_dev ** 2
332 denominator1 += len(current_set) - 1
333 denominator2 += 1.0 / len(current_set)
335 if denominator1:
336 return math.sqrt(numerator / denominator1) * math.sqrt(denominator2)
337 return 0.0
340 def CalculateStandardError(values):
341 """Calculates the standard error of a list of values."""
342 if len(values) <= 1:
343 return 0.0
345 std_dev = CalculateStandardDeviation(values)
347 return std_dev / math.sqrt(len(values))
350 def IsStringFloat(string_to_check):
351 """Checks whether or not the given string can be converted to a floating
352 point number.
354 Args:
355 string_to_check: Input string to check if it can be converted to a float.
357 Returns:
358 True if the string can be converted to a float.
360 try:
361 float(string_to_check)
363 return True
364 except ValueError:
365 return False
368 def IsStringInt(string_to_check):
369 """Checks whether or not the given string can be converted to a integer.
371 Args:
372 string_to_check: Input string to check if it can be converted to an int.
374 Returns:
375 True if the string can be converted to an int.
377 try:
378 int(string_to_check)
380 return True
381 except ValueError:
382 return False
385 def IsWindows():
386 """Checks whether or not the script is running on Windows.
388 Returns:
389 True if running on Windows.
391 return sys.platform == 'cygwin' or sys.platform.startswith('win')
394 def Is64BitWindows():
395 """Returns whether or not Windows is a 64-bit version.
397 Returns:
398 True if Windows is 64-bit, False if 32-bit.
400 platform = os.environ['PROCESSOR_ARCHITECTURE']
401 try:
402 platform = os.environ['PROCESSOR_ARCHITEW6432']
403 except KeyError:
404 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
405 pass
407 return platform in ['AMD64', 'I64']
410 def IsLinux():
411 """Checks whether or not the script is running on Linux.
413 Returns:
414 True if running on Linux.
416 return sys.platform.startswith('linux')
419 def IsMac():
420 """Checks whether or not the script is running on Mac.
422 Returns:
423 True if running on Mac.
425 return sys.platform.startswith('darwin')
428 def GetSHA1HexDigest(contents):
429 """Returns secured hash containing hexadecimal for the given contents."""
430 return hashlib.sha1(contents).hexdigest()
433 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
434 """Gets the archive file name for the given revision."""
435 def PlatformName():
436 """Return a string to be used in paths for the platform."""
437 if IsWindows():
438 # Build archive for x64 is still stored with 'win32'suffix
439 # (chromium_utils.PlatformName()).
440 if Is64BitWindows() and target_arch == 'x64':
441 return 'win32'
442 return 'win32'
443 if IsLinux():
444 return 'linux'
445 if IsMac():
446 return 'mac'
447 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
449 base_name = 'full-build-%s' % PlatformName()
450 if not build_revision:
451 return base_name
452 if patch_sha:
453 build_revision = '%s_%s' % (build_revision , patch_sha)
454 return '%s_%s.zip' % (base_name, build_revision)
457 def GetRemoteBuildPath(build_revision, target_arch='ia32', patch_sha=None):
458 """Compute the url to download the build from."""
459 def GetGSRootFolderName():
460 """Gets Google Cloud Storage root folder names"""
461 if IsWindows():
462 if Is64BitWindows() and target_arch == 'x64':
463 return 'Win x64 Builder'
464 return 'Win Builder'
465 if IsLinux():
466 return 'Linux Builder'
467 if IsMac():
468 return 'Mac Builder'
469 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
471 base_filename = GetZipFileName(build_revision, target_arch, patch_sha)
472 builder_folder = GetGSRootFolderName()
473 return '%s/%s' % (builder_folder, base_filename)
476 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
477 """Fetches file(s) from the Google Cloud Storage.
479 Args:
480 bucket_name: Google Storage bucket name.
481 source_path: Source file path.
482 destination_path: Destination file path.
484 Returns:
485 True if the fetching succeeds, otherwise False.
487 target_file = os.path.join(destination_path, os.path.basename(source_path))
488 try:
489 if cloud_storage.Exists(bucket_name, source_path):
490 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
491 cloud_storage.Get(bucket_name, source_path, destination_path)
492 if os.path.exists(target_file):
493 return True
494 else:
495 print ('File gs://%s/%s not found in cloud storage.' % (
496 bucket_name, source_path))
497 except Exception as e:
498 print 'Something went wrong while fetching file from cloud: %s' % e
499 if os.path.exists(target_file):
500 os.remove(target_file)
501 return False
504 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
505 def MaybeMakeDirectory(*path):
506 """Creates an entire path, if it doesn't already exist."""
507 file_path = os.path.join(*path)
508 try:
509 os.makedirs(file_path)
510 except OSError, e:
511 if e.errno != errno.EEXIST:
512 return False
513 return True
516 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
517 def ExtractZip(filename, output_dir, verbose=True):
518 """ Extract the zip archive in the output directory."""
519 MaybeMakeDirectory(output_dir)
521 # On Linux and Mac, we use the unzip command as it will
522 # handle links and file bits (executable), which is much
523 # easier then trying to do that with ZipInfo options.
525 # The Mac Version of unzip unfortunately does not support Zip64, whereas
526 # the python module does, so we have to fallback to the python zip module
527 # on Mac if the filesize is greater than 4GB.
529 # On Windows, try to use 7z if it is installed, otherwise fall back to python
530 # zip module and pray we don't have files larger than 512MB to unzip.
531 unzip_cmd = None
532 if ((IsMac() and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
533 or IsLinux()):
534 unzip_cmd = ['unzip', '-o']
535 elif IsWindows() and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe'):
536 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
538 if unzip_cmd:
539 # Make sure path is absolute before changing directories.
540 filepath = os.path.abspath(filename)
541 saved_dir = os.getcwd()
542 os.chdir(output_dir)
543 command = unzip_cmd + [filepath]
544 result = RunProcess(command)
545 os.chdir(saved_dir)
546 if result:
547 raise IOError('unzip failed: %s => %s' % (str(command), result))
548 else:
549 assert IsWindows() or IsMac()
550 zf = zipfile.ZipFile(filename)
551 for name in zf.namelist():
552 if verbose:
553 print 'Extracting %s' % name
554 zf.extract(name, output_dir)
555 if IsMac():
556 # Restore permission bits.
557 os.chmod(os.path.join(output_dir, name),
558 zf.getinfo(name).external_attr >> 16L)
561 def RunProcess(command):
562 """Runs an arbitrary command.
564 If output from the call is needed, use RunProcessAndRetrieveOutput instead.
566 Args:
567 command: A list containing the command and args to execute.
569 Returns:
570 The return code of the call.
572 # On Windows, use shell=True to get PATH interpretation.
573 shell = IsWindows()
574 return subprocess.call(command, shell=shell)
577 def RunProcessAndRetrieveOutput(command, cwd=None):
578 """Runs an arbitrary command, returning its output and return code.
580 Since output is collected via communicate(), there will be no output until
581 the call terminates. If you need output while the program runs (ie. so
582 that the buildbot doesn't terminate the script), consider RunProcess().
584 Args:
585 command: A list containing the command and args to execute.
586 cwd: A directory to change to while running the command. The command can be
587 relative to this directory. If this is None, the command will be run in
588 the current directory.
590 Returns:
591 A tuple of the output and return code.
593 if cwd:
594 original_cwd = os.getcwd()
595 os.chdir(cwd)
597 # On Windows, use shell=True to get PATH interpretation.
598 shell = IsWindows()
599 proc = subprocess.Popen(command, shell=shell, stdout=subprocess.PIPE)
600 (output, _) = proc.communicate()
602 if cwd:
603 os.chdir(original_cwd)
605 return (output, proc.returncode)
608 def RunGit(command, cwd=None):
609 """Run a git subcommand, returning its output and return code.
611 Args:
612 command: A list containing the args to git.
613 cwd: A directory to change to while running the git command (optional).
615 Returns:
616 A tuple of the output and return code.
618 command = ['git'] + command
620 return RunProcessAndRetrieveOutput(command, cwd=cwd)
623 def CheckRunGit(command, cwd=None):
624 """Run a git subcommand, returning its output and return code. Asserts if
625 the return code of the call is non-zero.
627 Args:
628 command: A list containing the args to git.
630 Returns:
631 A tuple of the output and return code.
633 (output, return_code) = RunGit(command, cwd=cwd)
635 assert not return_code, 'An error occurred while running'\
636 ' "git %s"' % ' '.join(command)
637 return output
640 def SetBuildSystemDefault(build_system):
641 """Sets up any environment variables needed to build with the specified build
642 system.
644 Args:
645 build_system: A string specifying build system. Currently only 'ninja' or
646 'make' are supported."""
647 if build_system == 'ninja':
648 gyp_var = os.getenv('GYP_GENERATORS')
650 if not gyp_var or not 'ninja' in gyp_var:
651 if gyp_var:
652 os.environ['GYP_GENERATORS'] = gyp_var + ',ninja'
653 else:
654 os.environ['GYP_GENERATORS'] = 'ninja'
656 if IsWindows():
657 os.environ['GYP_DEFINES'] = 'component=shared_library '\
658 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
659 'chromium_win_pch=0'
660 elif build_system == 'make':
661 os.environ['GYP_GENERATORS'] = 'make'
662 else:
663 raise RuntimeError('%s build not supported.' % build_system)
666 def BuildWithMake(threads, targets, build_type='Release'):
667 cmd = ['make', 'BUILDTYPE=%s' % build_type]
669 if threads:
670 cmd.append('-j%d' % threads)
672 cmd += targets
674 return_code = RunProcess(cmd)
676 return not return_code
679 def BuildWithNinja(threads, targets, build_type='Release'):
680 cmd = ['ninja', '-C', os.path.join('out', build_type)]
682 if threads:
683 cmd.append('-j%d' % threads)
685 cmd += targets
687 return_code = RunProcess(cmd)
689 return not return_code
692 def BuildWithVisualStudio(targets, build_type='Release'):
693 path_to_devenv = os.path.abspath(
694 os.path.join(os.environ['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
695 path_to_sln = os.path.join(os.getcwd(), 'chrome', 'chrome.sln')
696 cmd = [path_to_devenv, '/build', build_type, path_to_sln]
698 for t in targets:
699 cmd.extend(['/Project', t])
701 return_code = RunProcess(cmd)
703 return not return_code
706 def WriteStringToFile(text, file_name):
707 try:
708 with open(file_name, "wb") as f:
709 f.write(text)
710 except IOError as e:
711 raise RuntimeError('Error writing to file [%s]' % file_name )
714 def ReadStringFromFile(file_name):
715 try:
716 with open(file_name) as f:
717 return f.read()
718 except IOError as e:
719 raise RuntimeError('Error reading file [%s]' % file_name )
722 def ChangeBackslashToSlashInPatch(diff_text):
723 """Formats file paths in the given text to unix-style paths."""
724 if diff_text:
725 diff_lines = diff_text.split('\n')
726 for i in range(len(diff_lines)):
727 if (diff_lines[i].startswith('--- ') or
728 diff_lines[i].startswith('+++ ')):
729 diff_lines[i] = diff_lines[i].replace('\\', '/')
730 return '\n'.join(diff_lines)
731 return None
734 class Builder(object):
735 """Builder is used by the bisect script to build relevant targets and deploy.
737 def __init__(self, opts):
738 """Performs setup for building with target build system.
740 Args:
741 opts: Options parsed from command line.
743 if IsWindows():
744 if not opts.build_preference:
745 opts.build_preference = 'msvs'
747 if opts.build_preference == 'msvs':
748 if not os.getenv('VS100COMNTOOLS'):
749 raise RuntimeError(
750 'Path to visual studio could not be determined.')
751 else:
752 SetBuildSystemDefault(opts.build_preference)
753 else:
754 if not opts.build_preference:
755 if 'ninja' in os.getenv('GYP_GENERATORS'):
756 opts.build_preference = 'ninja'
757 else:
758 opts.build_preference = 'make'
760 SetBuildSystemDefault(opts.build_preference)
762 if not bisect_utils.SetupPlatformBuildEnvironment(opts):
763 raise RuntimeError('Failed to set platform environment.')
765 @staticmethod
766 def FromOpts(opts):
767 builder = None
768 if opts.target_platform == 'cros':
769 builder = CrosBuilder(opts)
770 elif opts.target_platform == 'android':
771 builder = AndroidBuilder(opts)
772 elif opts.target_platform == 'android-chrome':
773 builder = AndroidChromeBuilder(opts)
774 else:
775 builder = DesktopBuilder(opts)
776 return builder
778 def Build(self, depot, opts):
779 raise NotImplementedError()
781 def GetBuildOutputDirectory(self, opts, src_dir=None):
782 raise NotImplementedError()
785 class DesktopBuilder(Builder):
786 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
787 def __init__(self, opts):
788 super(DesktopBuilder, self).__init__(opts)
790 def Build(self, depot, opts):
791 """Builds chromium_builder_perf target using options passed into
792 the script.
794 Args:
795 depot: Current depot being bisected.
796 opts: The options parsed from the command line.
798 Returns:
799 True if build was successful.
801 targets = ['chromium_builder_perf']
803 threads = None
804 if opts.use_goma:
805 threads = 64
807 build_success = False
808 if opts.build_preference == 'make':
809 build_success = BuildWithMake(threads, targets, opts.target_build_type)
810 elif opts.build_preference == 'ninja':
811 build_success = BuildWithNinja(threads, targets, opts.target_build_type)
812 elif opts.build_preference == 'msvs':
813 assert IsWindows(), 'msvs is only supported on Windows.'
814 build_success = BuildWithVisualStudio(targets, opts.target_build_type)
815 else:
816 assert False, 'No build system defined.'
817 return build_success
819 def GetBuildOutputDirectory(self, opts, src_dir=None):
820 """Returns the path to the build directory, relative to the checkout root.
822 Assumes that the current working directory is the checkout root.
824 src_dir = src_dir or 'src'
825 if opts.build_preference == 'ninja' or IsLinux():
826 return os.path.join(src_dir, 'out')
827 if IsMac():
828 return os.path.join(src_dir, 'xcodebuild')
829 if IsWindows():
830 return os.path.join(src_dir, 'build')
831 raise NotImplementedError('Unexpected platform %s' % sys.platform)
834 class AndroidBuilder(Builder):
835 """AndroidBuilder is used to build on android."""
836 def __init__(self, opts):
837 super(AndroidBuilder, self).__init__(opts)
839 def _GetTargets(self):
840 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
842 def Build(self, depot, opts):
843 """Builds the android content shell and other necessary tools using options
844 passed into the script.
846 Args:
847 depot: Current depot being bisected.
848 opts: The options parsed from the command line.
850 Returns:
851 True if build was successful.
853 threads = None
854 if opts.use_goma:
855 threads = 64
857 build_success = False
858 if opts.build_preference == 'ninja':
859 build_success = BuildWithNinja(
860 threads, self._GetTargets(), opts.target_build_type)
861 else:
862 assert False, 'No build system defined.'
864 return build_success
867 class AndroidChromeBuilder(AndroidBuilder):
868 """AndroidBuilder is used to build on android's chrome."""
869 def __init__(self, opts):
870 super(AndroidChromeBuilder, self).__init__(opts)
872 def _GetTargets(self):
873 return AndroidBuilder._GetTargets(self) + ['chrome_apk']
876 class CrosBuilder(Builder):
877 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
878 target platform."""
879 def __init__(self, opts):
880 super(CrosBuilder, self).__init__(opts)
882 def ImageToTarget(self, opts):
883 """Installs latest image to target specified by opts.cros_remote_ip.
885 Args:
886 opts: Program options containing cros_board and cros_remote_ip.
888 Returns:
889 True if successful.
891 try:
892 # Keys will most likely be set to 0640 after wiping the chroot.
893 os.chmod(CROS_SCRIPT_KEY_PATH, 0600)
894 os.chmod(CROS_TEST_KEY_PATH, 0600)
895 cmd = [CROS_SDK_PATH, '--', './bin/cros_image_to_target.py',
896 '--remote=%s' % opts.cros_remote_ip,
897 '--board=%s' % opts.cros_board, '--test', '--verbose']
899 return_code = RunProcess(cmd)
900 return not return_code
901 except OSError, e:
902 return False
904 def BuildPackages(self, opts, depot):
905 """Builds packages for cros.
907 Args:
908 opts: Program options containing cros_board.
909 depot: The depot being bisected.
911 Returns:
912 True if successful.
914 cmd = [CROS_SDK_PATH]
916 if depot != 'cros':
917 path_to_chrome = os.path.join(os.getcwd(), '..')
918 cmd += ['--chrome_root=%s' % path_to_chrome]
920 cmd += ['--']
922 if depot != 'cros':
923 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
925 cmd += ['BUILDTYPE=%s' % opts.target_build_type, './build_packages',
926 '--board=%s' % opts.cros_board]
927 return_code = RunProcess(cmd)
929 return not return_code
931 def BuildImage(self, opts, depot):
932 """Builds test image for cros.
934 Args:
935 opts: Program options containing cros_board.
936 depot: The depot being bisected.
938 Returns:
939 True if successful.
941 cmd = [CROS_SDK_PATH]
943 if depot != 'cros':
944 path_to_chrome = os.path.join(os.getcwd(), '..')
945 cmd += ['--chrome_root=%s' % path_to_chrome]
947 cmd += ['--']
949 if depot != 'cros':
950 cmd += ['CHROME_ORIGIN=LOCAL_SOURCE']
952 cmd += ['BUILDTYPE=%s' % opts.target_build_type, '--', './build_image',
953 '--board=%s' % opts.cros_board, 'test']
955 return_code = RunProcess(cmd)
957 return not return_code
959 def Build(self, depot, opts):
960 """Builds targets using options passed into the script.
962 Args:
963 depot: Current depot being bisected.
964 opts: The options parsed from the command line.
966 Returns:
967 True if build was successful.
969 if self.BuildPackages(opts, depot):
970 if self.BuildImage(opts, depot):
971 return self.ImageToTarget(opts)
972 return False
975 class SourceControl(object):
976 """SourceControl is an abstraction over the underlying source control
977 system used for chromium. For now only git is supported, but in the
978 future, the svn workflow could be added as well."""
979 def __init__(self):
980 super(SourceControl, self).__init__()
982 def SyncToRevisionWithGClient(self, revision):
983 """Uses gclient to sync to the specified revision.
985 ie. gclient sync --revision <revision>
987 Args:
988 revision: The git SHA1 or svn CL (depending on workflow).
990 Returns:
991 The return code of the call.
993 return bisect_utils.RunGClient(['sync', '--verbose', '--reset', '--force',
994 '--delete_unversioned_trees', '--nohooks', '--revision', revision])
996 def SyncToRevisionWithRepo(self, timestamp):
997 """Uses repo to sync all the underlying git depots to the specified
998 time.
1000 Args:
1001 timestamp: The unix timestamp to sync to.
1003 Returns:
1004 The return code of the call.
1006 return bisect_utils.RunRepoSyncAtTimestamp(timestamp)
1009 class GitSourceControl(SourceControl):
1010 """GitSourceControl is used to query the underlying source control. """
1011 def __init__(self, opts):
1012 super(GitSourceControl, self).__init__()
1013 self.opts = opts
1015 def IsGit(self):
1016 return True
1018 def GetRevisionList(self, revision_range_end, revision_range_start, cwd=None):
1019 """Retrieves a list of revisions between |revision_range_start| and
1020 |revision_range_end|.
1022 Args:
1023 revision_range_end: The SHA1 for the end of the range.
1024 revision_range_start: The SHA1 for the beginning of the range.
1026 Returns:
1027 A list of the revisions between |revision_range_start| and
1028 |revision_range_end| (inclusive).
1030 revision_range = '%s..%s' % (revision_range_start, revision_range_end)
1031 cmd = ['log', '--format=%H', '-10000', '--first-parent', revision_range]
1032 log_output = CheckRunGit(cmd, cwd=cwd)
1034 revision_hash_list = log_output.split()
1035 revision_hash_list.append(revision_range_start)
1037 return revision_hash_list
1039 def SyncToRevision(self, revision, sync_client=None):
1040 """Syncs to the specified revision.
1042 Args:
1043 revision: The revision to sync to.
1044 use_gclient: Specifies whether or not we should sync using gclient or
1045 just use source control directly.
1047 Returns:
1048 True if successful.
1051 if not sync_client:
1052 results = RunGit(['checkout', revision])[1]
1053 elif sync_client == 'gclient':
1054 results = self.SyncToRevisionWithGClient(revision)
1055 elif sync_client == 'repo':
1056 results = self.SyncToRevisionWithRepo(revision)
1058 return not results
1060 def ResolveToRevision(self, revision_to_check, depot, search, cwd=None):
1061 """If an SVN revision is supplied, try to resolve it to a git SHA1.
1063 Args:
1064 revision_to_check: The user supplied revision string that may need to be
1065 resolved to a git SHA1.
1066 depot: The depot the revision_to_check is from.
1067 search: The number of changelists to try if the first fails to resolve
1068 to a git hash. If the value is negative, the function will search
1069 backwards chronologically, otherwise it will search forward.
1071 Returns:
1072 A string containing a git SHA1 hash, otherwise None.
1074 # Android-chrome is git only, so no need to resolve this to anything else.
1075 if depot == 'android-chrome':
1076 return revision_to_check
1078 if depot != 'cros':
1079 if not IsStringInt(revision_to_check):
1080 return revision_to_check
1082 depot_svn = 'svn://svn.chromium.org/chrome/trunk/src'
1084 if depot != 'chromium':
1085 depot_svn = DEPOT_DEPS_NAME[depot]['svn']
1087 svn_revision = int(revision_to_check)
1088 git_revision = None
1090 if search > 0:
1091 search_range = xrange(svn_revision, svn_revision + search, 1)
1092 else:
1093 search_range = xrange(svn_revision, svn_revision + search, -1)
1095 for i in search_range:
1096 svn_pattern = 'git-svn-id: %s@%d' % (depot_svn, i)
1097 cmd = ['log', '--format=%H', '-1', '--grep', svn_pattern,
1098 'origin/master']
1100 (log_output, return_code) = RunGit(cmd, cwd=cwd)
1102 assert not return_code, 'An error occurred while running'\
1103 ' "git %s"' % ' '.join(cmd)
1105 if not return_code:
1106 log_output = log_output.strip()
1108 if log_output:
1109 git_revision = log_output
1111 break
1113 return git_revision
1114 else:
1115 if IsStringInt(revision_to_check):
1116 return int(revision_to_check)
1117 else:
1118 cwd = os.getcwd()
1119 os.chdir(os.path.join(os.getcwd(), 'src', 'third_party',
1120 'chromiumos-overlay'))
1121 pattern = CROS_VERSION_PATTERN % revision_to_check
1122 cmd = ['log', '--format=%ct', '-1', '--grep', pattern]
1124 git_revision = None
1126 log_output = CheckRunGit(cmd, cwd=cwd)
1127 if log_output:
1128 git_revision = log_output
1129 git_revision = int(log_output.strip())
1130 os.chdir(cwd)
1132 return git_revision
1134 def IsInProperBranch(self):
1135 """Confirms they're in the master branch for performing the bisection.
1136 This is needed or gclient will fail to sync properly.
1138 Returns:
1139 True if the current branch on src is 'master'
1141 cmd = ['rev-parse', '--abbrev-ref', 'HEAD']
1142 log_output = CheckRunGit(cmd)
1143 log_output = log_output.strip()
1145 return log_output == "master"
1147 def SVNFindRev(self, revision, cwd=None):
1148 """Maps directly to the 'git svn find-rev' command.
1150 Args:
1151 revision: The git SHA1 to use.
1153 Returns:
1154 An integer changelist #, otherwise None.
1157 cmd = ['svn', 'find-rev', revision]
1159 output = CheckRunGit(cmd, cwd)
1160 svn_revision = output.strip()
1162 if IsStringInt(svn_revision):
1163 return int(svn_revision)
1165 return None
1167 def QueryRevisionInfo(self, revision, cwd=None):
1168 """Gathers information on a particular revision, such as author's name,
1169 email, subject, and date.
1171 Args:
1172 revision: Revision you want to gather information on.
1173 Returns:
1174 A dict in the following format:
1176 'author': %s,
1177 'email': %s,
1178 'date': %s,
1179 'subject': %s,
1180 'body': %s,
1183 commit_info = {}
1185 formats = ['%cN', '%cE', '%s', '%cD', '%b']
1186 targets = ['author', 'email', 'subject', 'date', 'body']
1188 for i in xrange(len(formats)):
1189 cmd = ['log', '--format=%s' % formats[i], '-1', revision]
1190 output = CheckRunGit(cmd, cwd=cwd)
1191 commit_info[targets[i]] = output.rstrip()
1193 return commit_info
1195 def CheckoutFileAtRevision(self, file_name, revision, cwd=None):
1196 """Performs a checkout on a file at the given revision.
1198 Returns:
1199 True if successful.
1201 return not RunGit(['checkout', revision, file_name], cwd=cwd)[1]
1203 def RevertFileToHead(self, file_name):
1204 """Unstages a file and returns it to HEAD.
1206 Returns:
1207 True if successful.
1209 # Reset doesn't seem to return 0 on success.
1210 RunGit(['reset', 'HEAD', file_name])
1212 return not RunGit(['checkout', bisect_utils.FILE_DEPS_GIT])[1]
1214 def QueryFileRevisionHistory(self, filename, revision_start, revision_end):
1215 """Returns a list of commits that modified this file.
1217 Args:
1218 filename: Name of file.
1219 revision_start: Start of revision range.
1220 revision_end: End of revision range.
1222 Returns:
1223 Returns a list of commits that touched this file.
1225 cmd = ['log', '--format=%H', '%s~1..%s' % (revision_start, revision_end),
1226 filename]
1227 output = CheckRunGit(cmd)
1229 return [o for o in output.split('\n') if o]
1232 class BisectPerformanceMetrics(object):
1233 """This class contains functionality to perform a bisection of a range of
1234 revisions to narrow down where performance regressions may have occurred.
1236 The main entry-point is the Run method.
1239 def __init__(self, source_control, opts):
1240 super(BisectPerformanceMetrics, self).__init__()
1242 self.opts = opts
1243 self.source_control = source_control
1244 self.src_cwd = os.getcwd()
1245 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
1246 self.depot_cwd = {}
1247 self.cleanup_commands = []
1248 self.warnings = []
1249 self.builder = Builder.FromOpts(opts)
1251 # This always starts true since the script grabs latest first.
1252 self.was_blink = True
1254 for d in DEPOT_NAMES:
1255 # The working directory of each depot is just the path to the depot, but
1256 # since we're already in 'src', we can skip that part.
1258 self.depot_cwd[d] = os.path.join(
1259 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
1261 def PerformCleanup(self):
1262 """Performs cleanup when script is finished."""
1263 os.chdir(self.src_cwd)
1264 for c in self.cleanup_commands:
1265 if c[0] == 'mv':
1266 shutil.move(c[1], c[2])
1267 else:
1268 assert False, 'Invalid cleanup command.'
1270 def GetRevisionList(self, depot, bad_revision, good_revision):
1271 """Retrieves a list of all the commits between the bad revision and
1272 last known good revision."""
1274 revision_work_list = []
1276 if depot == 'cros':
1277 revision_range_start = good_revision
1278 revision_range_end = bad_revision
1280 cwd = os.getcwd()
1281 self.ChangeToDepotWorkingDirectory('cros')
1283 # Print the commit timestamps for every commit in the revision time
1284 # range. We'll sort them and bisect by that. There is a remote chance that
1285 # 2 (or more) commits will share the exact same timestamp, but it's
1286 # probably safe to ignore that case.
1287 cmd = ['repo', 'forall', '-c',
1288 'git log --format=%%ct --before=%d --after=%d' % (
1289 revision_range_end, revision_range_start)]
1290 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1292 assert not return_code, 'An error occurred while running'\
1293 ' "%s"' % ' '.join(cmd)
1295 os.chdir(cwd)
1297 revision_work_list = list(set(
1298 [int(o) for o in output.split('\n') if IsStringInt(o)]))
1299 revision_work_list = sorted(revision_work_list, reverse=True)
1300 else:
1301 cwd = self._GetDepotDirectory(depot)
1302 revision_work_list = self.source_control.GetRevisionList(bad_revision,
1303 good_revision, cwd=cwd)
1305 return revision_work_list
1307 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
1308 svn_revision = self.source_control.SVNFindRev(revision)
1310 if IsStringInt(svn_revision):
1311 # V8 is tricky to bisect, in that there are only a few instances when
1312 # we can dive into bleeding_edge and get back a meaningful result.
1313 # Try to detect a V8 "business as usual" case, which is when:
1314 # 1. trunk revision N has description "Version X.Y.Z"
1315 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1316 # trunk. Now working on X.Y.(Z+1)."
1318 # As of 01/24/2014, V8 trunk descriptions are formatted:
1319 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1320 # So we can just try parsing that out first and fall back to the old way.
1321 v8_dir = self._GetDepotDirectory('v8')
1322 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
1324 revision_info = self.source_control.QueryRevisionInfo(revision,
1325 cwd=v8_dir)
1327 version_re = re.compile("Version (?P<values>[0-9,.]+)")
1329 regex_results = version_re.search(revision_info['subject'])
1331 if regex_results:
1332 git_revision = None
1334 # Look for "based on bleeding_edge" and parse out revision
1335 if 'based on bleeding_edge' in revision_info['subject']:
1336 try:
1337 bleeding_edge_revision = revision_info['subject'].split(
1338 'bleeding_edge revision r')[1]
1339 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1340 git_revision = self.source_control.ResolveToRevision(
1341 bleeding_edge_revision, 'v8_bleeding_edge', 1,
1342 cwd=v8_bleeding_edge_dir)
1343 return git_revision
1344 except (IndexError, ValueError):
1345 pass
1347 if not git_revision:
1348 # Wasn't successful, try the old way of looking for "Prepare push to"
1349 git_revision = self.source_control.ResolveToRevision(
1350 int(svn_revision) - 1, 'v8_bleeding_edge', -1,
1351 cwd=v8_bleeding_edge_dir)
1353 if git_revision:
1354 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1355 cwd=v8_bleeding_edge_dir)
1357 if 'Prepare push to trunk' in revision_info['subject']:
1358 return git_revision
1359 return None
1361 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1362 cwd = self._GetDepotDirectory('v8')
1363 cmd = ['log', '--format=%ct', '-1', revision]
1364 output = CheckRunGit(cmd, cwd=cwd)
1365 commit_time = int(output)
1366 commits = []
1368 if search_forward:
1369 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1370 'origin/master']
1371 output = CheckRunGit(cmd, cwd=cwd)
1372 output = output.split()
1373 commits = output
1374 commits = reversed(commits)
1375 else:
1376 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1377 'origin/master']
1378 output = CheckRunGit(cmd, cwd=cwd)
1379 output = output.split()
1380 commits = output
1382 bleeding_edge_revision = None
1384 for c in commits:
1385 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1386 if bleeding_edge_revision:
1387 break
1389 return bleeding_edge_revision
1391 def _ParseRevisionsFromDEPSFileManually(self, deps_file_contents):
1392 """Manually parses the vars section of the DEPS file to determine
1393 chromium/blink/etc... revisions.
1395 Returns:
1396 A dict in the format {depot:revision} if successful, otherwise None.
1398 # We'll parse the "vars" section of the DEPS file.
1399 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
1400 re_results = rxp.search(deps_file_contents)
1401 locals = {}
1403 if not re_results:
1404 return None
1406 # We should be left with a series of entries in the vars component of
1407 # the DEPS file with the following format:
1408 # 'depot_name': 'revision',
1409 vars_body = re_results.group('vars_body')
1410 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
1411 re.MULTILINE)
1412 re_results = rxp.findall(vars_body)
1414 return dict(re_results)
1416 def _ParseRevisionsFromDEPSFile(self, depot):
1417 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1418 be needed if the bisect recurses into those depots later.
1420 Args:
1421 depot: Depot being bisected.
1423 Returns:
1424 A dict in the format {depot:revision} if successful, otherwise None.
1426 try:
1427 deps_data = {'Var': lambda _: deps_data["vars"][_],
1428 'From': lambda *args: None
1430 execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data)
1431 deps_data = deps_data['deps']
1433 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1434 results = {}
1435 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems():
1436 if (depot_data.get('platform') and
1437 depot_data.get('platform') != os.name):
1438 continue
1440 if (depot_data.get('recurse') and depot in depot_data.get('from')):
1441 depot_data_src = depot_data.get('src') or depot_data.get('src_old')
1442 src_dir = deps_data.get(depot_data_src)
1443 if src_dir:
1444 self.depot_cwd[depot_name] = os.path.join(self.src_cwd,
1445 depot_data_src[4:])
1446 re_results = rxp.search(src_dir)
1447 if re_results:
1448 results[depot_name] = re_results.group('revision')
1449 else:
1450 warning_text = ('Couldn\'t parse revision for %s while bisecting '
1451 '%s' % (depot_name, depot))
1452 if not warning_text in self.warnings:
1453 self.warnings.append(warning_text)
1454 else:
1455 results[depot_name] = None
1456 return results
1457 except ImportError:
1458 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT)
1459 parse_results = self._ParseRevisionsFromDEPSFileManually(
1460 deps_file_contents)
1461 results = {}
1462 for depot_name, depot_revision in parse_results.iteritems():
1463 depot_revision = depot_revision.strip('@')
1464 print depot_name, depot_revision
1465 for current_name, current_data in DEPOT_DEPS_NAME.iteritems():
1466 if (current_data.has_key('deps_var') and
1467 current_data['deps_var'] == depot_name):
1468 src_name = current_name
1469 results[src_name] = depot_revision
1470 break
1471 return results
1473 def Get3rdPartyRevisionsFromCurrentRevision(self, depot, revision):
1474 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1476 Returns:
1477 A dict in the format {depot:revision} if successful, otherwise None.
1479 cwd = os.getcwd()
1480 self.ChangeToDepotWorkingDirectory(depot)
1482 results = {}
1484 if depot == 'chromium' or depot == 'android-chrome':
1485 results = self._ParseRevisionsFromDEPSFile(depot)
1486 os.chdir(cwd)
1487 elif depot == 'cros':
1488 cmd = [CROS_SDK_PATH, '--', 'portageq-%s' % self.opts.cros_board,
1489 'best_visible', '/build/%s' % self.opts.cros_board, 'ebuild',
1490 CROS_CHROMEOS_PATTERN]
1491 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
1493 assert not return_code, 'An error occurred while running' \
1494 ' "%s"' % ' '.join(cmd)
1496 if len(output) > CROS_CHROMEOS_PATTERN:
1497 output = output[len(CROS_CHROMEOS_PATTERN):]
1499 if len(output) > 1:
1500 output = output.split('_')[0]
1502 if len(output) > 3:
1503 contents = output.split('.')
1505 version = contents[2]
1507 if contents[3] != '0':
1508 warningText = 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1509 (version, contents[3], version)
1510 if not warningText in self.warnings:
1511 self.warnings.append(warningText)
1513 cwd = os.getcwd()
1514 self.ChangeToDepotWorkingDirectory('chromium')
1515 return_code = CheckRunGit(['log', '-1', '--format=%H',
1516 '--author=chrome-release@google.com', '--grep=to %s' % version,
1517 'origin/master'])
1518 os.chdir(cwd)
1520 results['chromium'] = output.strip()
1521 elif depot == 'v8':
1522 # We can't try to map the trunk revision to bleeding edge yet, because
1523 # we don't know which direction to try to search in. Have to wait until
1524 # the bisect has narrowed the results down to 2 v8 rolls.
1525 results['v8_bleeding_edge'] = None
1527 return results
1529 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1530 """Backs up or restores build output directory based on restore argument.
1532 Args:
1533 restore: Indicates whether to restore or backup. Default is False(Backup)
1534 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1536 Returns:
1537 Path to backup or restored location as string. otherwise None if it fails.
1539 build_dir = os.path.abspath(
1540 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1541 source_dir = os.path.join(build_dir, build_type)
1542 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1543 if restore:
1544 source_dir, destination_dir = destination_dir, source_dir
1545 if os.path.exists(source_dir):
1546 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1547 shutil.move(source_dir, destination_dir)
1548 return destination_dir
1549 return None
1551 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1552 """Downloads the build archive for the given revision.
1554 Args:
1555 revision: The SVN revision to build.
1556 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1558 Returns:
1559 True if download succeeds, otherwise False.
1561 patch_sha = None
1562 if patch:
1563 # Get the SHA of the DEPS changes patch.
1564 patch_sha = GetSHA1HexDigest(patch)
1566 # Update the DEPS changes patch with a patch to create a new file named
1567 # 'DEPS.sha' and add patch_sha evaluated above to it.
1568 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1570 # Source archive file path on cloud storage.
1571 source_file = GetRemoteBuildPath(revision, self.opts.target_arch, patch_sha)
1573 # Get Build output directory
1574 abs_build_dir = os.path.abspath(
1575 self.builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1576 # Downloaded archive file path.
1577 downloaded_file = os.path.join(
1578 abs_build_dir,
1579 GetZipFileName(revision, self.opts.target_arch, patch_sha))
1581 fetch_build_func = lambda: FetchFromCloudStorage(self.opts.gs_bucket,
1582 source_file,
1583 abs_build_dir)
1585 if not fetch_build_func():
1586 if not self.PostBuildRequestAndWait(revision,
1587 condition=fetch_build_func,
1588 patch=patch):
1589 raise RuntimeError('Somewthing went wrong while processing build'
1590 'request for: %s' % revision)
1591 # Generic name for the archive, created when archive file is extracted.
1592 output_dir = os.path.join(
1593 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1594 # Unzip build archive directory.
1595 try:
1596 RmTreeAndMkDir(output_dir, skip_makedir=True)
1597 ExtractZip(downloaded_file, abs_build_dir)
1598 if os.path.exists(output_dir):
1599 self.BackupOrRestoreOutputdirectory(restore=False)
1600 # Build output directory based on target(e.g. out/Release, out/Debug).
1601 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1602 print 'Moving build from %s to %s' % (
1603 output_dir, target_build_output_dir)
1604 shutil.move(output_dir, target_build_output_dir)
1605 return True
1606 raise IOError('Missing extracted folder %s ' % output_dir)
1607 except Exception as e:
1608 print 'Somewthing went wrong while extracting archive file: %s' % e
1609 self.BackupOrRestoreOutputdirectory(restore=True)
1610 # Cleanup any leftovers from unzipping.
1611 if os.path.exists(output_dir):
1612 RmTreeAndMkDir(output_dir, skip_makedir=True)
1613 finally:
1614 # Delete downloaded archive
1615 if os.path.exists(downloaded_file):
1616 os.remove(downloaded_file)
1617 return False
1619 def PostBuildRequestAndWait(self, revision, condition, patch=None):
1620 """POSTs the build request job to the tryserver instance."""
1622 def GetBuilderNameAndBuildTime(target_arch='ia32'):
1623 """Gets builder bot name and buildtime in seconds based on platform."""
1624 # Bot names should match the one listed in tryserver.chromium's
1625 # master.cfg which produces builds for bisect.
1626 if IsWindows():
1627 if Is64BitWindows() and target_arch == 'x64':
1628 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1629 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1630 if IsLinux():
1631 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1632 if IsMac():
1633 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1634 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1635 if not condition:
1636 return False
1638 bot_name, build_timeout = GetBuilderNameAndBuildTime(self.opts.target_arch)
1640 # Create a unique ID for each build request posted to try server builders.
1641 # This ID is added to "Reason" property in build's json.
1642 # TODO: Use this id to track the build status.
1643 build_request_id = GetSHA1HexDigest('%s-%s' % (revision, patch))
1645 # Creates a try job description.
1646 job_args = {'host': self.opts.builder_host,
1647 'port': self.opts.builder_port,
1648 'revision': 'src@%s' % revision,
1649 'bot': bot_name,
1650 'name': build_request_id
1652 # Update patch information if supplied.
1653 if patch:
1654 job_args['patch'] = patch
1655 # Posts job to build the revision on the server.
1656 if post_perf_builder_job.PostTryJob(job_args):
1657 poll_interval = 60
1658 start_time = time.time()
1659 while True:
1660 res = condition()
1661 if res:
1662 return res
1663 elapsed_time = time.time() - start_time
1664 if elapsed_time > build_timeout:
1665 raise RuntimeError('Timed out while waiting %ds for %s build.' %
1666 (build_timeout, revision))
1667 print ('Time elapsed: %ss, still waiting for %s build' %
1668 (elapsed_time, revision))
1669 time.sleep(poll_interval)
1670 return False
1672 def IsDownloadable(self, depot):
1673 """Checks if build is downloadable based on target platform and depot."""
1674 if self.opts.target_platform in ['chromium'] and self.opts.gs_bucket:
1675 return (depot == 'chromium' or
1676 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1677 'v8' in DEPOT_DEPS_NAME[depot]['from'])
1678 return False
1680 def UpdateDeps(self, revision, depot, deps_file):
1681 """Updates DEPS file with new revision of dependency repository.
1683 This method search DEPS for a particular pattern in which depot revision
1684 is specified (e.g "webkit_revision": "123456"). If a match is found then
1685 it resolves the given git hash to SVN revision and replace it in DEPS file.
1687 Args:
1688 revision: A git hash revision of the dependency repository.
1689 depot: Current depot being bisected.
1690 deps_file: Path to DEPS file.
1692 Returns:
1693 True if DEPS file is modified successfully, otherwise False.
1695 if not os.path.exists(deps_file):
1696 return False
1698 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1699 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1700 if not deps_var:
1701 print 'DEPS update not supported for Depot: %s', depot
1702 return False
1704 # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1705 # contains "angle_revision" key that holds git hash instead of SVN revision.
1706 # And sometime "angle_revision" key is not specified in "vars" variable,
1707 # in such cases check "deps" dictionary variable that matches
1708 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1709 if depot == 'angle':
1710 return self.UpdateDEPSForAngle(revision, depot, deps_file)
1712 try:
1713 deps_contents = ReadStringFromFile(deps_file)
1714 # Check whether the depot and revision pattern in DEPS file vars
1715 # e.g. for webkit the format is "webkit_revision": "12345".
1716 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1717 re.MULTILINE)
1718 match = re.search(deps_revision, deps_contents)
1719 if match:
1720 svn_revision = self.source_control.SVNFindRev(
1721 revision, self._GetDepotDirectory(depot))
1722 if not svn_revision:
1723 print 'Could not determine SVN revision for %s' % revision
1724 return False
1725 # Update the revision information for the given depot
1726 new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1728 # For v8_bleeding_edge revisions change V8 branch in order
1729 # to fetch bleeding edge revision.
1730 if depot == 'v8_bleeding_edge':
1731 new_data = self.UpdateV8Branch(new_data)
1732 if not new_data:
1733 return False
1734 # Write changes to DEPS file
1735 WriteStringToFile(new_data, deps_file)
1736 return True
1737 except IOError, e:
1738 print 'Something went wrong while updating DEPS file. [%s]' % e
1739 return False
1741 def UpdateV8Branch(self, deps_content):
1742 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1744 Check for "v8_branch" in DEPS file if exists update its value
1745 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1746 variable from DEPS revision 254916, therefore check for "src/v8":
1747 <v8 source path> in DEPS in order to support prior DEPS revisions
1748 and update it.
1750 Args:
1751 deps_content: DEPS file contents to be modified.
1753 Returns:
1754 Modified DEPS file contents as a string.
1756 new_branch = r'branches/bleeding_edge'
1757 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
1758 if re.search(v8_branch_pattern, deps_content):
1759 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
1760 else:
1761 # Replaces the branch assigned to "src/v8" key in DEPS file.
1762 # Format of "src/v8" in DEPS:
1763 # "src/v8":
1764 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1765 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1766 v8_src_pattern = re.compile(
1767 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
1768 if re.search(v8_src_pattern, deps_content):
1769 deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
1770 return deps_content
1772 def UpdateDEPSForAngle(self, revision, depot, deps_file):
1773 """Updates DEPS file with new revision for Angle repository.
1775 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1776 variable contains "angle_revision" key that holds git hash instead of
1777 SVN revision.
1779 And sometimes "angle_revision" key is not specified in "vars" variable,
1780 in such cases check "deps" dictionary variable that matches
1781 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1783 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1784 try:
1785 deps_contents = ReadStringFromFile(deps_file)
1786 # Check whether the depot and revision pattern in DEPS file vars variable
1787 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1788 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1789 deps_var, re.MULTILINE)
1790 match = re.search(angle_rev_pattern % deps_var, deps_contents)
1791 if match:
1792 # Update the revision information for the given depot
1793 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1794 else:
1795 # Check whether the depot and revision pattern in DEPS file deps
1796 # variable. e.g.,
1797 # "src/third_party/angle": Var("chromium_git") +
1798 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1799 angle_rev_pattern = re.compile(
1800 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
1801 match = re.search(angle_rev_pattern, deps_contents)
1802 if not match:
1803 print 'Could not find angle revision information in DEPS file.'
1804 return False
1805 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
1806 # Write changes to DEPS file
1807 WriteStringToFile(new_data, deps_file)
1808 return True
1809 except IOError, e:
1810 print 'Something went wrong while updating DEPS file, %s' % e
1811 return False
1813 def CreateDEPSPatch(self, depot, revision):
1814 """Modifies DEPS and returns diff as text.
1816 Args:
1817 depot: Current depot being bisected.
1818 revision: A git hash revision of the dependency repository.
1820 Returns:
1821 A tuple with git hash of chromium revision and DEPS patch text.
1823 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1824 if not os.path.exists(deps_file_path):
1825 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1826 # Get current chromium revision (git hash).
1827 chromium_sha = CheckRunGit(['rev-parse', 'HEAD']).strip()
1828 if not chromium_sha:
1829 raise RuntimeError('Failed to determine Chromium revision for %s' %
1830 revision)
1831 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1832 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1833 # Checkout DEPS file for the current chromium revision.
1834 if self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1835 chromium_sha,
1836 cwd=self.src_cwd):
1837 if self.UpdateDeps(revision, depot, deps_file_path):
1838 diff_command = ['diff',
1839 '--src-prefix=src/',
1840 '--dst-prefix=src/',
1841 '--no-ext-diff',
1842 bisect_utils.FILE_DEPS]
1843 diff_text = CheckRunGit(diff_command, cwd=self.src_cwd)
1844 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1845 else:
1846 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
1847 chromium_sha)
1848 else:
1849 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
1850 chromium_sha)
1851 return (None, None)
1853 def BuildCurrentRevision(self, depot, revision=None):
1854 """Builds chrome and performance_ui_tests on the current revision.
1856 Returns:
1857 True if the build was successful.
1859 if self.opts.debug_ignore_build:
1860 return True
1861 cwd = os.getcwd()
1862 os.chdir(self.src_cwd)
1863 # Fetch build archive for the given revision from the cloud storage when
1864 # the storage bucket is passed.
1865 if self.IsDownloadable(depot) and revision:
1866 deps_patch = None
1867 if depot != 'chromium':
1868 # Create a DEPS patch with new revision for dependency repository.
1869 (revision, deps_patch) = self.CreateDEPSPatch(depot, revision)
1870 # Get SVN revision for the given SHA, since builds are archived using SVN
1871 # revision.
1872 chromium_revision = self.source_control.SVNFindRev(revision)
1873 if not chromium_revision:
1874 raise RuntimeError(
1875 'Failed to determine SVN revision for %s' % revision)
1876 if self.DownloadCurrentBuild(chromium_revision, patch=deps_patch):
1877 os.chdir(cwd)
1878 if deps_patch:
1879 # Reverts the changes to DEPS file.
1880 self.source_control.CheckoutFileAtRevision(bisect_utils.FILE_DEPS,
1881 revision,
1882 cwd=self.src_cwd)
1883 return True
1884 raise RuntimeError('Failed to download build archive for revision %s.\n'
1885 'Unfortunately, bisection couldn\'t continue any '
1886 'further. Please try running script without '
1887 '--gs_bucket flag to produce local builds.' % revision)
1890 build_success = self.builder.Build(depot, self.opts)
1891 os.chdir(cwd)
1892 return build_success
1894 def RunGClientHooks(self):
1895 """Runs gclient with runhooks command.
1897 Returns:
1898 True if gclient reports no errors.
1901 if self.opts.debug_ignore_build:
1902 return True
1904 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1906 def TryParseHistogramValuesFromOutput(self, metric, text):
1907 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
1909 Args:
1910 metric: The metric as a list of [<trace>, <value>] strings.
1911 text: The text to parse the metric values from.
1913 Returns:
1914 A list of floating point numbers found.
1916 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
1918 text_lines = text.split('\n')
1919 values_list = []
1921 for current_line in text_lines:
1922 if metric_formatted in current_line:
1923 current_line = current_line[len(metric_formatted):]
1925 try:
1926 histogram_values = eval(current_line)
1928 for b in histogram_values['buckets']:
1929 average_for_bucket = float(b['high'] + b['low']) * 0.5
1930 # Extends the list with N-elements with the average for that bucket.
1931 values_list.extend([average_for_bucket] * b['count'])
1932 except:
1933 pass
1935 return values_list
1937 def TryParseResultValuesFromOutput(self, metric, text):
1938 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
1940 Args:
1941 metric: The metric as a list of [<trace>, <value>] strings.
1942 text: The text to parse the metric values from.
1944 Returns:
1945 A list of floating point numbers found.
1947 # Format is: RESULT <graph>: <trace>= <value> <units>
1948 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
1950 # The log will be parsed looking for format:
1951 # <*>RESULT <graph_name>: <trace_name>= <value>
1952 single_result_re = re.compile(
1953 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
1955 # The log will be parsed looking for format:
1956 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
1957 multi_results_re = re.compile(
1958 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
1960 # The log will be parsed looking for format:
1961 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
1962 mean_stddev_re = re.compile(
1963 metric_re +
1964 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
1966 text_lines = text.split('\n')
1967 values_list = []
1968 for current_line in text_lines:
1969 # Parse the output from the performance test for the metric we're
1970 # interested in.
1971 single_result_match = single_result_re.search(current_line)
1972 multi_results_match = multi_results_re.search(current_line)
1973 mean_stddev_match = mean_stddev_re.search(current_line)
1974 if (not single_result_match is None and
1975 single_result_match.group('VALUE')):
1976 values_list += [single_result_match.group('VALUE')]
1977 elif (not multi_results_match is None and
1978 multi_results_match.group('VALUES')):
1979 metric_values = multi_results_match.group('VALUES')
1980 values_list += metric_values.split(',')
1981 elif (not mean_stddev_match is None and
1982 mean_stddev_match.group('MEAN')):
1983 values_list += [mean_stddev_match.group('MEAN')]
1985 values_list = [float(v) for v in values_list if IsStringFloat(v)]
1987 # If the metric is times/t, we need to sum the timings in order to get
1988 # similar regression results as the try-bots.
1989 metrics_to_sum = [['times', 't'], ['times', 'page_load_time'],
1990 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
1992 if metric in metrics_to_sum:
1993 if values_list:
1994 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
1996 return values_list
1998 def ParseMetricValuesFromOutput(self, metric, text):
1999 """Parses output from performance_ui_tests and retrieves the results for
2000 a given metric.
2002 Args:
2003 metric: The metric as a list of [<trace>, <value>] strings.
2004 text: The text to parse the metric values from.
2006 Returns:
2007 A list of floating point numbers found.
2009 metric_values = self.TryParseResultValuesFromOutput(metric, text)
2011 if not metric_values:
2012 metric_values = self.TryParseHistogramValuesFromOutput(metric, text)
2014 return metric_values
2016 def _GenerateProfileIfNecessary(self, command_args):
2017 """Checks the command line of the performance test for dependencies on
2018 profile generation, and runs tools/perf/generate_profile as necessary.
2020 Args:
2021 command_args: Command line being passed to performance test, as a list.
2023 Returns:
2024 False if profile generation was necessary and failed, otherwise True.
2027 if '--profile-dir' in ' '.join(command_args):
2028 # If we were using python 2.7+, we could just use the argparse
2029 # module's parse_known_args to grab --profile-dir. Since some of the
2030 # bots still run 2.6, have to grab the arguments manually.
2031 arg_dict = {}
2032 args_to_parse = ['--profile-dir', '--browser']
2034 for arg_to_parse in args_to_parse:
2035 for i, current_arg in enumerate(command_args):
2036 if arg_to_parse in current_arg:
2037 current_arg_split = current_arg.split('=')
2039 # Check 2 cases, --arg=<val> and --arg <val>
2040 if len(current_arg_split) == 2:
2041 arg_dict[arg_to_parse] = current_arg_split[1]
2042 elif i + 1 < len(command_args):
2043 arg_dict[arg_to_parse] = command_args[i+1]
2045 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
2047 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
2048 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
2049 return not RunProcess(['python', path_to_generate,
2050 '--profile-type-to-generate', profile_type,
2051 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
2052 return False
2053 return True
2055 def _IsBisectModeUsingMetric(self):
2056 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
2058 def _IsBisectModeReturnCode(self):
2059 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
2061 def _IsBisectModeStandardDeviation(self):
2062 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
2064 def RunPerformanceTestAndParseResults(
2065 self, command_to_run, metric, reset_on_first_run=False,
2066 upload_on_last_run=False, results_label=None):
2067 """Runs a performance test on the current revision and parses the results.
2069 Args:
2070 command_to_run: The command to be run to execute the performance test.
2071 metric: The metric to parse out from the results of the performance test.
2072 This is the result chart name and trace name, separated by slash.
2073 reset_on_first_run: If True, pass the flag --reset-results on first run.
2074 upload_on_last_run: If True, pass the flag --upload-results on last run.
2075 results_label: A value for the option flag --results-label.
2076 The arguments reset_on_first_run, upload_on_last_run and results_label
2077 are all ignored if the test is not a Telemetry test.
2079 Returns:
2080 (values dict, 0) if --debug_ignore_perf_test was passed.
2081 (values dict, 0, test output) if the test was run successfully.
2082 (error message, -1) if the test couldn't be run.
2083 (error message, -1, test output) if the test ran but there was an error.
2085 success_code, failure_code = 0, -1
2087 if self.opts.debug_ignore_perf_test:
2088 fake_results = {
2089 'mean': 0.0,
2090 'std_err': 0.0,
2091 'std_dev': 0.0,
2092 'values': [0.0]
2094 return (fake_results, success_code)
2096 # For Windows platform set posix=False, to parse windows paths correctly.
2097 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
2098 # refer to http://bugs.python.org/issue1724822. By default posix=True.
2099 args = shlex.split(command_to_run, posix=not IsWindows())
2101 if not self._GenerateProfileIfNecessary(args):
2102 err_text = 'Failed to generate profile for performance test.'
2103 return (err_text, failure_code)
2105 # If running a Telemetry test for Chrome OS, insert the remote IP and
2106 # identity parameters.
2107 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
2108 if self.opts.target_platform == 'cros' and is_telemetry:
2109 args.append('--remote=%s' % self.opts.cros_remote_ip)
2110 args.append('--identity=%s' % CROS_TEST_KEY_PATH)
2112 start_time = time.time()
2114 metric_values = []
2115 output_of_all_runs = ''
2116 for i in xrange(self.opts.repeat_test_count):
2117 # Can ignore the return code since if the tests fail, it won't return 0.
2118 current_args = copy.copy(args)
2119 if is_telemetry:
2120 if i == 0 and reset_on_first_run:
2121 current_args.append('--reset-results')
2122 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
2123 current_args.append('--upload-results')
2124 if results_label:
2125 current_args.append('--results-label=%s' % results_label)
2126 try:
2127 (output, return_code) = RunProcessAndRetrieveOutput(current_args,
2128 cwd=self.src_cwd)
2129 except OSError, e:
2130 if e.errno == errno.ENOENT:
2131 err_text = ('Something went wrong running the performance test. '
2132 'Please review the command line:\n\n')
2133 if 'src/' in ' '.join(args):
2134 err_text += ('Check that you haven\'t accidentally specified a '
2135 'path with src/ in the command.\n\n')
2136 err_text += ' '.join(args)
2137 err_text += '\n'
2139 return (err_text, failure_code)
2140 raise
2142 output_of_all_runs += output
2143 if self.opts.output_buildbot_annotations:
2144 print output
2146 if self._IsBisectModeUsingMetric():
2147 metric_values += self.ParseMetricValuesFromOutput(metric, output)
2148 # If we're bisecting on a metric (ie, changes in the mean or
2149 # standard deviation) and no metric values are produced, bail out.
2150 if not metric_values:
2151 break
2152 elif self._IsBisectModeReturnCode():
2153 metric_values.append(return_code)
2155 elapsed_minutes = (time.time() - start_time) / 60.0
2156 if elapsed_minutes >= self.opts.max_time_minutes:
2157 break
2159 if len(metric_values) == 0:
2160 err_text = 'Metric %s was not found in the test output.' % metric
2161 # TODO(qyearsley): Consider also getting and displaying a list of metrics
2162 # that were found in the output here.
2163 return (err_text, failure_code, output_of_all_runs)
2165 # If we're bisecting on return codes, we're really just looking for zero vs
2166 # non-zero.
2167 if self._IsBisectModeReturnCode():
2168 # If any of the return codes is non-zero, output 1.
2169 overall_return_code = 0 if (
2170 all(current_value == 0 for current_value in metric_values)) else 1
2172 values = {
2173 'mean': overall_return_code,
2174 'std_err': 0.0,
2175 'std_dev': 0.0,
2176 'values': metric_values,
2179 print 'Results of performance test: Command returned with %d' % (
2180 overall_return_code)
2181 print
2182 else:
2183 # Need to get the average value if there were multiple values.
2184 truncated_mean = CalculateTruncatedMean(metric_values,
2185 self.opts.truncate_percent)
2186 standard_err = CalculateStandardError(metric_values)
2187 standard_dev = CalculateStandardDeviation(metric_values)
2189 if self._IsBisectModeStandardDeviation():
2190 metric_values = [standard_dev]
2192 values = {
2193 'mean': truncated_mean,
2194 'std_err': standard_err,
2195 'std_dev': standard_dev,
2196 'values': metric_values,
2199 print 'Results of performance test: %12f %12f' % (
2200 truncated_mean, standard_err)
2201 print
2202 return (values, success_code, output_of_all_runs)
2204 def FindAllRevisionsToSync(self, revision, depot):
2205 """Finds all dependant revisions and depots that need to be synced for a
2206 given revision. This is only useful in the git workflow, as an svn depot
2207 may be split into multiple mirrors.
2209 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2210 skia/include. To sync skia/src properly, one has to find the proper
2211 revisions in skia/gyp and skia/include.
2213 Args:
2214 revision: The revision to sync to.
2215 depot: The depot in use at the moment (probably skia).
2217 Returns:
2218 A list of [depot, revision] pairs that need to be synced.
2220 revisions_to_sync = [[depot, revision]]
2222 is_base = ((depot == 'chromium') or (depot == 'cros') or
2223 (depot == 'android-chrome'))
2225 # Some SVN depots were split into multiple git depots, so we need to
2226 # figure out for each mirror which git revision to grab. There's no
2227 # guarantee that the SVN revision will exist for each of the dependant
2228 # depots, so we have to grep the git logs and grab the next earlier one.
2229 if not is_base and\
2230 DEPOT_DEPS_NAME[depot]['depends'] and\
2231 self.source_control.IsGit():
2232 svn_rev = self.source_control.SVNFindRev(revision)
2234 for d in DEPOT_DEPS_NAME[depot]['depends']:
2235 self.ChangeToDepotWorkingDirectory(d)
2237 dependant_rev = self.source_control.ResolveToRevision(svn_rev, d, -1000)
2239 if dependant_rev:
2240 revisions_to_sync.append([d, dependant_rev])
2242 num_resolved = len(revisions_to_sync)
2243 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
2245 self.ChangeToDepotWorkingDirectory(depot)
2247 if not ((num_resolved - 1) == num_needed):
2248 return None
2250 return revisions_to_sync
2252 def PerformPreBuildCleanup(self):
2253 """Performs necessary cleanup between runs."""
2254 print 'Cleaning up between runs.'
2255 print
2257 # Having these pyc files around between runs can confuse the
2258 # perf tests and cause them to crash.
2259 for (path, _, files) in os.walk(self.src_cwd):
2260 for cur_file in files:
2261 if cur_file.endswith('.pyc'):
2262 path_to_file = os.path.join(path, cur_file)
2263 os.remove(path_to_file)
2265 def PerformWebkitDirectoryCleanup(self, revision):
2266 """If the script is switching between Blink and WebKit during bisect,
2267 its faster to just delete the directory rather than leave it up to git
2268 to sync.
2270 Returns:
2271 True if successful.
2273 if not self.source_control.CheckoutFileAtRevision(
2274 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
2275 return False
2277 cwd = os.getcwd()
2278 os.chdir(self.src_cwd)
2280 is_blink = bisect_utils.IsDepsFileBlink()
2282 os.chdir(cwd)
2284 if not self.source_control.RevertFileToHead(
2285 bisect_utils.FILE_DEPS_GIT):
2286 return False
2288 if self.was_blink != is_blink:
2289 self.was_blink = is_blink
2290 # Removes third_party/Webkit directory.
2291 return bisect_utils.RemoveThirdPartyDirectory('Webkit')
2292 return True
2294 def PerformCrosChrootCleanup(self):
2295 """Deletes the chroot.
2297 Returns:
2298 True if successful.
2300 cwd = os.getcwd()
2301 self.ChangeToDepotWorkingDirectory('cros')
2302 cmd = [CROS_SDK_PATH, '--delete']
2303 return_code = RunProcess(cmd)
2304 os.chdir(cwd)
2305 return not return_code
2307 def CreateCrosChroot(self):
2308 """Creates a new chroot.
2310 Returns:
2311 True if successful.
2313 cwd = os.getcwd()
2314 self.ChangeToDepotWorkingDirectory('cros')
2315 cmd = [CROS_SDK_PATH, '--create']
2316 return_code = RunProcess(cmd)
2317 os.chdir(cwd)
2318 return not return_code
2320 def PerformPreSyncCleanup(self, revision, depot):
2321 """Performs any necessary cleanup before syncing.
2323 Returns:
2324 True if successful.
2326 if depot == 'chromium':
2327 # Removes third_party/libjingle. At some point, libjingle was causing
2328 # issues syncing when using the git workflow (crbug.com/266324).
2329 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'):
2330 return False
2331 # Removes third_party/skia. At some point, skia was causing
2332 # issues syncing when using the git workflow (crbug.com/377951).
2333 if not bisect_utils.RemoveThirdPartyDirectory('skia'):
2334 return False
2335 return self.PerformWebkitDirectoryCleanup(revision)
2336 elif depot == 'cros':
2337 return self.PerformCrosChrootCleanup()
2338 return True
2340 def RunPostSync(self, depot):
2341 """Performs any work after syncing.
2343 Returns:
2344 True if successful.
2346 if self.opts.target_platform == 'android':
2347 if not bisect_utils.SetupAndroidBuildEnvironment(self.opts,
2348 path_to_src=self.src_cwd):
2349 return False
2351 if depot == 'cros':
2352 return self.CreateCrosChroot()
2353 else:
2354 return self.RunGClientHooks()
2355 return True
2357 def ShouldSkipRevision(self, depot, revision):
2358 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2359 is git based those changes would have no effect.
2361 Args:
2362 depot: The depot being bisected.
2363 revision: Current revision we're synced to.
2365 Returns:
2366 True if we should skip building/testing this revision.
2368 if depot == 'chromium':
2369 if self.source_control.IsGit():
2370 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
2371 output = CheckRunGit(cmd)
2373 files = output.splitlines()
2375 if len(files) == 1 and files[0] == 'DEPS':
2376 return True
2378 return False
2380 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
2381 skippable=False):
2382 """Performs a full sync/build/run of the specified revision.
2384 Args:
2385 revision: The revision to sync to.
2386 depot: The depot that's being used at the moment (src, webkit, etc.)
2387 command_to_run: The command to execute the performance test.
2388 metric: The performance metric being tested.
2390 Returns:
2391 On success, a tuple containing the results of the performance test.
2392 Otherwise, a tuple with the error message.
2394 sync_client = None
2395 if depot == 'chromium' or depot == 'android-chrome':
2396 sync_client = 'gclient'
2397 elif depot == 'cros':
2398 sync_client = 'repo'
2400 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
2402 if not revisions_to_sync:
2403 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL)
2405 if not self.PerformPreSyncCleanup(revision, depot):
2406 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
2408 success = True
2410 if not self.opts.debug_ignore_sync:
2411 for r in revisions_to_sync:
2412 self.ChangeToDepotWorkingDirectory(r[0])
2414 if sync_client:
2415 self.PerformPreBuildCleanup()
2417 # If you're using gclient to sync, you need to specify the depot you
2418 # want so that all the dependencies sync properly as well.
2419 # ie. gclient sync src@<SHA1>
2420 current_revision = r[1]
2421 if sync_client == 'gclient':
2422 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
2423 current_revision)
2424 if not self.source_control.SyncToRevision(current_revision,
2425 sync_client):
2426 success = False
2428 break
2430 if success:
2431 success = self.RunPostSync(depot)
2432 if success:
2433 if skippable and self.ShouldSkipRevision(depot, revision):
2434 return ('Skipped revision: [%s]' % str(revision),
2435 BUILD_RESULT_SKIPPED)
2437 start_build_time = time.time()
2438 if self.BuildCurrentRevision(depot, revision):
2439 after_build_time = time.time()
2440 results = self.RunPerformanceTestAndParseResults(command_to_run,
2441 metric)
2442 # Restore build output directory once the tests are done, to avoid
2443 # any descrepancy.
2444 if self.IsDownloadable(depot) and revision:
2445 self.BackupOrRestoreOutputdirectory(restore=True)
2447 if results[1] == 0:
2448 external_revisions = self.Get3rdPartyRevisionsFromCurrentRevision(
2449 depot, revision)
2451 if not external_revisions is None:
2452 return (results[0], results[1], external_revisions,
2453 time.time() - after_build_time, after_build_time -
2454 start_build_time)
2455 else:
2456 return ('Failed to parse DEPS file for external revisions.',
2457 BUILD_RESULT_FAIL)
2458 else:
2459 return results
2460 else:
2461 return ('Failed to build revision: [%s]' % (str(revision, )),
2462 BUILD_RESULT_FAIL)
2463 else:
2464 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
2465 else:
2466 return ('Failed to sync revision: [%s]' % (str(revision, )),
2467 BUILD_RESULT_FAIL)
2469 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
2470 """Given known good and bad values, decide if the current_value passed
2471 or failed.
2473 Args:
2474 current_value: The value of the metric being checked.
2475 known_bad_value: The reference value for a "failed" run.
2476 known_good_value: The reference value for a "passed" run.
2478 Returns:
2479 True if the current_value is closer to the known_good_value than the
2480 known_bad_value.
2482 if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2483 dist_to_good_value = abs(current_value['std_dev'] -
2484 known_good_value['std_dev'])
2485 dist_to_bad_value = abs(current_value['std_dev'] -
2486 known_bad_value['std_dev'])
2487 else:
2488 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2489 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2491 return dist_to_good_value < dist_to_bad_value
2493 def _GetDepotDirectory(self, depot_name):
2494 if depot_name == 'chromium':
2495 return self.src_cwd
2496 elif depot_name == 'cros':
2497 return self.cros_cwd
2498 elif depot_name in DEPOT_NAMES:
2499 return self.depot_cwd[depot_name]
2500 else:
2501 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2502 ' was added without proper support?' % depot_name
2504 def ChangeToDepotWorkingDirectory(self, depot_name):
2505 """Given a depot, changes to the appropriate working directory.
2507 Args:
2508 depot_name: The name of the depot (see DEPOT_NAMES).
2510 os.chdir(self._GetDepotDirectory(depot_name))
2512 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2513 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2514 search_forward=True)
2515 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2516 search_forward=False)
2517 min_revision_data['external']['v8_bleeding_edge'] = r1
2518 max_revision_data['external']['v8_bleeding_edge'] = r2
2520 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2521 min_revision_data['revision']) or
2522 not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2523 max_revision_data['revision'])):
2524 self.warnings.append('Trunk revisions in V8 did not map directly to '
2525 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2526 'did map directly to bleeding_edge revisions, but results might not '
2527 'be valid.')
2529 def _FindNextDepotToBisect(self, current_depot, current_revision,
2530 min_revision_data, max_revision_data):
2531 """Given the state of the bisect, decides which depot the script should
2532 dive into next (if any).
2534 Args:
2535 current_depot: Current depot being bisected.
2536 current_revision: Current revision synced to.
2537 min_revision_data: Data about the earliest revision in the bisect range.
2538 max_revision_data: Data about the latest revision in the bisect range.
2540 Returns:
2541 The depot to bisect next, or None.
2543 external_depot = None
2544 for next_depot in DEPOT_NAMES:
2545 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2546 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2547 continue
2549 if not (DEPOT_DEPS_NAME[next_depot]["recurse"] and
2550 min_revision_data['depot'] in DEPOT_DEPS_NAME[next_depot]['from']):
2551 continue
2553 if current_depot == 'v8':
2554 # We grab the bleeding_edge info here rather than earlier because we
2555 # finally have the revision range. From that we can search forwards and
2556 # backwards to try to match trunk revisions to bleeding_edge.
2557 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2559 if (min_revision_data['external'].get(next_depot) ==
2560 max_revision_data['external'].get(next_depot)):
2561 continue
2563 if (min_revision_data['external'].get(next_depot) and
2564 max_revision_data['external'].get(next_depot)):
2565 external_depot = next_depot
2566 break
2568 return external_depot
2570 def PrepareToBisectOnDepot(self,
2571 current_depot,
2572 end_revision,
2573 start_revision,
2574 previous_depot,
2575 previous_revision):
2576 """Changes to the appropriate directory and gathers a list of revisions
2577 to bisect between |start_revision| and |end_revision|.
2579 Args:
2580 current_depot: The depot we want to bisect.
2581 end_revision: End of the revision range.
2582 start_revision: Start of the revision range.
2583 previous_depot: The depot we were previously bisecting.
2584 previous_revision: The last revision we synced to on |previous_depot|.
2586 Returns:
2587 A list containing the revisions between |start_revision| and
2588 |end_revision| inclusive.
2590 # Change into working directory of external library to run
2591 # subsequent commands.
2592 self.ChangeToDepotWorkingDirectory(current_depot)
2594 # V8 (and possibly others) is merged in periodically. Bisecting
2595 # this directory directly won't give much good info.
2596 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2597 config_path = os.path.join(self.src_cwd, '..')
2598 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2599 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2600 return []
2601 if bisect_utils.RunGClient(
2602 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2603 return []
2605 if current_depot == 'v8_bleeding_edge':
2606 self.ChangeToDepotWorkingDirectory('chromium')
2608 shutil.move('v8', 'v8.bak')
2609 shutil.move('v8_bleeding_edge', 'v8')
2611 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2612 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2614 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2615 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2617 self.ChangeToDepotWorkingDirectory(current_depot)
2619 depot_revision_list = self.GetRevisionList(current_depot,
2620 end_revision,
2621 start_revision)
2623 self.ChangeToDepotWorkingDirectory('chromium')
2625 return depot_revision_list
2627 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2628 """Gathers reference values by running the performance tests on the
2629 known good and bad revisions.
2631 Args:
2632 good_rev: The last known good revision where the performance regression
2633 has not occurred yet.
2634 bad_rev: A revision where the performance regression has already occurred.
2635 cmd: The command to execute the performance test.
2636 metric: The metric being tested for regression.
2638 Returns:
2639 A tuple with the results of building and running each revision.
2641 bad_run_results = self.SyncBuildAndRunRevision(bad_rev,
2642 target_depot,
2643 cmd,
2644 metric)
2646 good_run_results = None
2648 if not bad_run_results[1]:
2649 good_run_results = self.SyncBuildAndRunRevision(good_rev,
2650 target_depot,
2651 cmd,
2652 metric)
2654 return (bad_run_results, good_run_results)
2656 def AddRevisionsIntoRevisionData(self, revisions, depot, sort, revision_data):
2657 """Adds new revisions to the revision_data dict and initializes them.
2659 Args:
2660 revisions: List of revisions to add.
2661 depot: Depot that's currently in use (src, webkit, etc...)
2662 sort: Sorting key for displaying revisions.
2663 revision_data: A dict to add the new revisions into. Existing revisions
2664 will have their sort keys offset.
2667 num_depot_revisions = len(revisions)
2669 for _, v in revision_data.iteritems():
2670 if v['sort'] > sort:
2671 v['sort'] += num_depot_revisions
2673 for i in xrange(num_depot_revisions):
2674 r = revisions[i]
2676 revision_data[r] = {'revision' : r,
2677 'depot' : depot,
2678 'value' : None,
2679 'perf_time' : 0,
2680 'build_time' : 0,
2681 'passed' : '?',
2682 'sort' : i + sort + 1}
2684 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2685 if self.opts.output_buildbot_annotations:
2686 step_name = 'Bisection Range: [%s - %s]' % (
2687 revision_list[len(revision_list)-1], revision_list[0])
2688 bisect_utils.OutputAnnotationStepStart(step_name)
2690 print
2691 print 'Revisions to bisect on [%s]:' % depot
2692 for revision_id in revision_list:
2693 print ' -> %s' % (revision_id, )
2694 print
2696 if self.opts.output_buildbot_annotations:
2697 bisect_utils.OutputAnnotationStepClosed()
2699 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2700 """Checks to see if changes to DEPS file occurred, and that the revision
2701 range also includes the change to .DEPS.git. If it doesn't, attempts to
2702 expand the revision range to include it.
2704 Args:
2705 bad_rev: First known bad revision.
2706 good_revision: Last known good revision.
2708 Returns:
2709 A tuple with the new bad and good revisions.
2711 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2712 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2713 'DEPS', good_revision, bad_revision)
2715 if changes_to_deps:
2716 # DEPS file was changed, search from the oldest change to DEPS file to
2717 # bad_revision to see if there are matching .DEPS.git changes.
2718 oldest_deps_change = changes_to_deps[-1]
2719 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2720 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2722 if len(changes_to_deps) != len(changes_to_gitdeps):
2723 # Grab the timestamp of the last DEPS change
2724 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2725 output = CheckRunGit(cmd)
2726 commit_time = int(output)
2728 # Try looking for a commit that touches the .DEPS.git file in the
2729 # next 15 minutes after the DEPS file change.
2730 cmd = ['log', '--format=%H', '-1',
2731 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2732 'origin/master', bisect_utils.FILE_DEPS_GIT]
2733 output = CheckRunGit(cmd)
2734 output = output.strip()
2735 if output:
2736 self.warnings.append('Detected change to DEPS and modified '
2737 'revision range to include change to .DEPS.git')
2738 return (output, good_revision)
2739 else:
2740 self.warnings.append('Detected change to DEPS but couldn\'t find '
2741 'matching change to .DEPS.git')
2742 return (bad_revision, good_revision)
2744 def CheckIfRevisionsInProperOrder(self,
2745 target_depot,
2746 good_revision,
2747 bad_revision):
2748 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2750 Args:
2751 good_revision: Number/tag of the known good revision.
2752 bad_revision: Number/tag of the known bad revision.
2754 Returns:
2755 True if the revisions are in the proper order (good earlier than bad).
2757 if self.source_control.IsGit() and target_depot != 'cros':
2758 cmd = ['log', '--format=%ct', '-1', good_revision]
2759 cwd = self._GetDepotDirectory(target_depot)
2761 output = CheckRunGit(cmd, cwd=cwd)
2762 good_commit_time = int(output)
2764 cmd = ['log', '--format=%ct', '-1', bad_revision]
2765 output = CheckRunGit(cmd, cwd=cwd)
2766 bad_commit_time = int(output)
2768 return good_commit_time <= bad_commit_time
2769 else:
2770 # Cros/svn use integers
2771 return int(good_revision) <= int(bad_revision)
2773 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2774 """Given known good and bad revisions, run a binary search on all
2775 intermediate revisions to determine the CL where the performance regression
2776 occurred.
2778 Args:
2779 command_to_run: Specify the command to execute the performance test.
2780 good_revision: Number/tag of the known good revision.
2781 bad_revision: Number/tag of the known bad revision.
2782 metric: The performance metric to monitor.
2784 Returns:
2785 A dict with 2 members, 'revision_data' and 'error'. On success,
2786 'revision_data' will contain a dict mapping revision ids to
2787 data about that revision. Each piece of revision data consists of a
2788 dict with the following keys:
2790 'passed': Represents whether the performance test was successful at
2791 that revision. Possible values include: 1 (passed), 0 (failed),
2792 '?' (skipped), 'F' (build failed).
2793 'depot': The depot that this revision is from (ie. WebKit)
2794 'external': If the revision is a 'src' revision, 'external' contains
2795 the revisions of each of the external libraries.
2796 'sort': A sort value for sorting the dict in order of commits.
2798 For example:
2800 'error':None,
2801 'revision_data':
2803 'CL #1':
2805 'passed':False,
2806 'depot':'chromium',
2807 'external':None,
2808 'sort':0
2813 If an error occurred, the 'error' field will contain the message and
2814 'revision_data' will be empty.
2816 results = {'revision_data' : {},
2817 'error' : None}
2819 # Choose depot to bisect first
2820 target_depot = 'chromium'
2821 if self.opts.target_platform == 'cros':
2822 target_depot = 'cros'
2823 elif self.opts.target_platform == 'android-chrome':
2824 target_depot = 'android-chrome'
2826 cwd = os.getcwd()
2827 self.ChangeToDepotWorkingDirectory(target_depot)
2829 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
2830 bad_revision = self.source_control.ResolveToRevision(bad_revision_in,
2831 target_depot, 100)
2832 good_revision = self.source_control.ResolveToRevision(good_revision_in,
2833 target_depot, -100)
2835 os.chdir(cwd)
2838 if bad_revision is None:
2839 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in,)
2840 return results
2842 if good_revision is None:
2843 results['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in,)
2844 return results
2846 # Check that they didn't accidentally swap good and bad revisions.
2847 if not self.CheckIfRevisionsInProperOrder(
2848 target_depot, good_revision, bad_revision):
2849 results['error'] = 'bad_revision < good_revision, did you swap these '\
2850 'by mistake?'
2851 return results
2853 (bad_revision, good_revision) = self.NudgeRevisionsIfDEPSChange(
2854 bad_revision, good_revision)
2856 if self.opts.output_buildbot_annotations:
2857 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2859 print 'Gathering revision range for bisection.'
2860 # Retrieve a list of revisions to do bisection on.
2861 src_revision_list = self.GetRevisionList(target_depot,
2862 bad_revision,
2863 good_revision)
2865 if self.opts.output_buildbot_annotations:
2866 bisect_utils.OutputAnnotationStepClosed()
2868 if src_revision_list:
2869 # revision_data will store information about a revision such as the
2870 # depot it came from, the webkit/V8 revision at that time,
2871 # performance timing, build state, etc...
2872 revision_data = results['revision_data']
2874 # revision_list is the list we're binary searching through at the moment.
2875 revision_list = []
2877 sort_key_ids = 0
2879 for current_revision_id in src_revision_list:
2880 sort_key_ids += 1
2882 revision_data[current_revision_id] = {'value' : None,
2883 'passed' : '?',
2884 'depot' : target_depot,
2885 'external' : None,
2886 'perf_time' : 0,
2887 'build_time' : 0,
2888 'sort' : sort_key_ids}
2889 revision_list.append(current_revision_id)
2891 min_revision = 0
2892 max_revision = len(revision_list) - 1
2894 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2896 if self.opts.output_buildbot_annotations:
2897 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2899 print 'Gathering reference values for bisection.'
2901 # Perform the performance tests on the good and bad revisions, to get
2902 # reference values.
2903 (bad_results, good_results) = self.GatherReferenceValues(good_revision,
2904 bad_revision,
2905 command_to_run,
2906 metric,
2907 target_depot)
2909 if self.opts.output_buildbot_annotations:
2910 bisect_utils.OutputAnnotationStepClosed()
2912 if bad_results[1]:
2913 results['error'] = ('An error occurred while building and running '
2914 'the \'bad\' reference value. The bisect cannot continue without '
2915 'a working \'bad\' revision to start from.\n\nError: %s' %
2916 bad_results[0])
2917 return results
2919 if good_results[1]:
2920 results['error'] = ('An error occurred while building and running '
2921 'the \'good\' reference value. The bisect cannot continue without '
2922 'a working \'good\' revision to start from.\n\nError: %s' %
2923 good_results[0])
2924 return results
2927 # We need these reference values to determine if later runs should be
2928 # classified as pass or fail.
2929 known_bad_value = bad_results[0]
2930 known_good_value = good_results[0]
2932 # Can just mark the good and bad revisions explicitly here since we
2933 # already know the results.
2934 bad_revision_data = revision_data[revision_list[0]]
2935 bad_revision_data['external'] = bad_results[2]
2936 bad_revision_data['perf_time'] = bad_results[3]
2937 bad_revision_data['build_time'] = bad_results[4]
2938 bad_revision_data['passed'] = False
2939 bad_revision_data['value'] = known_bad_value
2941 good_revision_data = revision_data[revision_list[max_revision]]
2942 good_revision_data['external'] = good_results[2]
2943 good_revision_data['perf_time'] = good_results[3]
2944 good_revision_data['build_time'] = good_results[4]
2945 good_revision_data['passed'] = True
2946 good_revision_data['value'] = known_good_value
2948 next_revision_depot = target_depot
2950 while True:
2951 if not revision_list:
2952 break
2954 min_revision_data = revision_data[revision_list[min_revision]]
2955 max_revision_data = revision_data[revision_list[max_revision]]
2957 if max_revision - min_revision <= 1:
2958 current_depot = min_revision_data['depot']
2959 if min_revision_data['passed'] == '?':
2960 next_revision_index = min_revision
2961 elif max_revision_data['passed'] == '?':
2962 next_revision_index = max_revision
2963 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2964 previous_revision = revision_list[min_revision]
2965 # If there were changes to any of the external libraries we track,
2966 # should bisect the changes there as well.
2967 external_depot = self._FindNextDepotToBisect(current_depot,
2968 previous_revision, min_revision_data, max_revision_data)
2970 # If there was no change in any of the external depots, the search
2971 # is over.
2972 if not external_depot:
2973 if current_depot == 'v8':
2974 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2975 'continue any further. The script can only bisect into '
2976 'V8\'s bleeding_edge repository if both the current and '
2977 'previous revisions in trunk map directly to revisions in '
2978 'bleeding_edge.')
2979 break
2981 earliest_revision = max_revision_data['external'][external_depot]
2982 latest_revision = min_revision_data['external'][external_depot]
2984 new_revision_list = self.PrepareToBisectOnDepot(external_depot,
2985 latest_revision,
2986 earliest_revision,
2987 next_revision_depot,
2988 previous_revision)
2990 if not new_revision_list:
2991 results['error'] = 'An error occurred attempting to retrieve'\
2992 ' revision range: [%s..%s]' % \
2993 (earliest_revision, latest_revision)
2994 return results
2996 self.AddRevisionsIntoRevisionData(new_revision_list,
2997 external_depot,
2998 min_revision_data['sort'],
2999 revision_data)
3001 # Reset the bisection and perform it on the newly inserted
3002 # changelists.
3003 revision_list = new_revision_list
3004 min_revision = 0
3005 max_revision = len(revision_list) - 1
3006 sort_key_ids += len(revision_list)
3008 print 'Regression in metric:%s appears to be the result of changes'\
3009 ' in [%s].' % (metric, external_depot)
3011 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
3013 continue
3014 else:
3015 break
3016 else:
3017 next_revision_index = int((max_revision - min_revision) / 2) +\
3018 min_revision
3020 next_revision_id = revision_list[next_revision_index]
3021 next_revision_data = revision_data[next_revision_id]
3022 next_revision_depot = next_revision_data['depot']
3024 self.ChangeToDepotWorkingDirectory(next_revision_depot)
3026 if self.opts.output_buildbot_annotations:
3027 step_name = 'Working on [%s]' % next_revision_id
3028 bisect_utils.OutputAnnotationStepStart(step_name)
3030 print 'Working on revision: [%s]' % next_revision_id
3032 run_results = self.SyncBuildAndRunRevision(next_revision_id,
3033 next_revision_depot,
3034 command_to_run,
3035 metric, skippable=True)
3037 # If the build is successful, check whether or not the metric
3038 # had regressed.
3039 if not run_results[1]:
3040 if len(run_results) > 2:
3041 next_revision_data['external'] = run_results[2]
3042 next_revision_data['perf_time'] = run_results[3]
3043 next_revision_data['build_time'] = run_results[4]
3045 passed_regression = self._CheckIfRunPassed(run_results[0],
3046 known_good_value,
3047 known_bad_value)
3049 next_revision_data['passed'] = passed_regression
3050 next_revision_data['value'] = run_results[0]
3052 if passed_regression:
3053 max_revision = next_revision_index
3054 else:
3055 min_revision = next_revision_index
3056 else:
3057 if run_results[1] == BUILD_RESULT_SKIPPED:
3058 next_revision_data['passed'] = 'Skipped'
3059 elif run_results[1] == BUILD_RESULT_FAIL:
3060 next_revision_data['passed'] = 'Build Failed'
3062 print run_results[0]
3064 # If the build is broken, remove it and redo search.
3065 revision_list.pop(next_revision_index)
3067 max_revision -= 1
3069 if self.opts.output_buildbot_annotations:
3070 self._PrintPartialResults(results)
3071 bisect_utils.OutputAnnotationStepClosed()
3072 else:
3073 # Weren't able to sync and retrieve the revision range.
3074 results['error'] = 'An error occurred attempting to retrieve revision '\
3075 'range: [%s..%s]' % (good_revision, bad_revision)
3077 return results
3079 def _PrintPartialResults(self, results_dict):
3080 revision_data = results_dict['revision_data']
3081 revision_data_sorted = sorted(revision_data.iteritems(),
3082 key = lambda x: x[1]['sort'])
3083 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3085 self._PrintTestedCommitsTable(revision_data_sorted,
3086 results_dict['first_working_revision'],
3087 results_dict['last_broken_revision'],
3088 100, final_step=False)
3090 def _PrintConfidence(self, results_dict):
3091 # The perf dashboard specifically looks for the string
3092 # "Confidence in Bisection Results: 100%" to decide whether or not
3093 # to cc the author(s). If you change this, please update the perf
3094 # dashboard as well.
3095 print 'Confidence in Bisection Results: %d%%' % results_dict['confidence']
3097 def _PrintBanner(self, results_dict):
3098 print
3099 print " __o_\___ Aw Snap! We hit a speed bump!"
3100 print "=-O----O-'__.~.___________________________________"
3101 print
3102 if self._IsBisectModeReturnCode():
3103 print ('Bisect reproduced a change in return codes while running the '
3104 'performance test.')
3105 else:
3106 print ('Bisect reproduced a %.02f%% (+-%.02f%%) change in the '
3107 '%s metric.' % (results_dict['regression_size'],
3108 results_dict['regression_std_err'], '/'.join(self.opts.metric)))
3109 self._PrintConfidence(results_dict)
3111 def _PrintFailedBanner(self, results_dict):
3112 print
3113 if self._IsBisectModeReturnCode():
3114 print 'Bisect could not reproduce a change in the return code.'
3115 else:
3116 print ('Bisect could not reproduce a change in the '
3117 '%s metric.' % '/'.join(self.opts.metric))
3118 print
3120 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
3121 info = self.source_control.QueryRevisionInfo(cl,
3122 self._GetDepotDirectory(depot))
3123 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
3124 try:
3125 # Format is "git-svn-id: svn://....@123456 <other data>"
3126 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
3127 svn_revision = svn_line[0].split('@')
3128 svn_revision = svn_revision[1].split(' ')[0]
3129 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
3130 except IndexError:
3131 return ''
3132 return ''
3134 def _PrintRevisionInfo(self, cl, info, depot=None):
3135 # The perf dashboard specifically looks for the string
3136 # "Author : " to parse out who to cc on a bug. If you change the
3137 # formatting here, please update the perf dashboard as well.
3138 print
3139 print 'Subject : %s' % info['subject']
3140 print 'Author : %s' % info['author']
3141 if not info['email'].startswith(info['author']):
3142 print 'Email : %s' % info['email']
3143 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
3144 if commit_link:
3145 print 'Link : %s' % commit_link
3146 else:
3147 print
3148 print 'Failed to parse svn revision from body:'
3149 print
3150 print info['body']
3151 print
3152 print 'Commit : %s' % cl
3153 print 'Date : %s' % info['date']
3155 def _PrintTableRow(self, column_widths, row_data):
3156 assert len(column_widths) == len(row_data)
3158 text = ''
3159 for i in xrange(len(column_widths)):
3160 current_row_data = row_data[i].center(column_widths[i], ' ')
3161 text += ('%%%ds' % column_widths[i]) % current_row_data
3162 print text
3164 def _PrintTestedCommitsHeader(self):
3165 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3166 self._PrintTableRow(
3167 [20, 70, 14, 12, 13],
3168 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
3169 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3170 self._PrintTableRow(
3171 [20, 70, 14, 12, 13],
3172 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
3173 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3174 self._PrintTableRow(
3175 [20, 70, 14, 13],
3176 ['Depot', 'Commit SHA', 'Return Code', 'State'])
3177 else:
3178 assert False, "Invalid bisect_mode specified."
3179 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '),
3180 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '),
3181 'State'.center(13, ' '))
3183 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
3184 if self.opts.bisect_mode == BISECT_MODE_MEAN:
3185 std_error = '+-%.02f' % current_data['value']['std_err']
3186 mean = '%.02f' % current_data['value']['mean']
3187 self._PrintTableRow(
3188 [20, 70, 12, 14, 13],
3189 [current_data['depot'], cl_link, mean, std_error, state_str])
3190 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
3191 std_error = '+-%.02f' % current_data['value']['std_err']
3192 mean = '%.02f' % current_data['value']['mean']
3193 self._PrintTableRow(
3194 [20, 70, 12, 14, 13],
3195 [current_data['depot'], cl_link, std_error, mean, state_str])
3196 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
3197 mean = '%d' % current_data['value']['mean']
3198 self._PrintTableRow(
3199 [20, 70, 14, 13],
3200 [current_data['depot'], cl_link, mean, state_str])
3202 def _PrintTestedCommitsTable(self, revision_data_sorted,
3203 first_working_revision, last_broken_revision, confidence,
3204 final_step=True):
3205 print
3206 if final_step:
3207 print 'Tested commits:'
3208 else:
3209 print 'Partial results:'
3210 self._PrintTestedCommitsHeader()
3211 state = 0
3212 for current_id, current_data in revision_data_sorted:
3213 if current_data['value']:
3214 if (current_id == last_broken_revision or
3215 current_id == first_working_revision):
3216 # If confidence is too low, don't add this empty line since it's
3217 # used to put focus on a suspected CL.
3218 if confidence and final_step:
3219 print
3220 state += 1
3221 if state == 2 and not final_step:
3222 # Just want a separation between "bad" and "good" cl's.
3223 print
3225 state_str = 'Bad'
3226 if state == 1 and final_step:
3227 state_str = 'Suspected CL'
3228 elif state == 2:
3229 state_str = 'Good'
3231 # If confidence is too low, don't bother outputting good/bad.
3232 if not confidence:
3233 state_str = ''
3234 state_str = state_str.center(13, ' ')
3236 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3237 current_data['depot'])
3238 if not cl_link:
3239 cl_link = current_id
3240 self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
3242 def _PrintReproSteps(self):
3243 print
3244 print 'To reproduce locally:'
3245 print '$ ' + self.opts.command
3246 if bisect_utils.IsTelemetryCommand(self.opts.command):
3247 print
3248 print 'Also consider passing --profiler=list to see available profilers.'
3250 def _PrintOtherRegressions(self, other_regressions, revision_data):
3251 print
3252 print 'Other regressions may have occurred:'
3253 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3254 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3255 for regression in other_regressions:
3256 current_id, previous_id, confidence = regression
3257 current_data = revision_data[current_id]
3258 previous_data = revision_data[previous_id]
3260 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
3261 current_data['depot'])
3262 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
3263 previous_data['depot'])
3265 # If we can't map it to a viewable URL, at least show the original hash.
3266 if not current_link:
3267 current_link = current_id
3268 if not previous_link:
3269 previous_link = previous_id
3271 print ' %8s %70s %s' % (
3272 current_data['depot'], current_link,
3273 ('%d%%' % confidence).center(10, ' '))
3274 print ' %8s %70s' % (
3275 previous_data['depot'], previous_link)
3276 print
3278 def _PrintStepTime(self, revision_data_sorted):
3279 step_perf_time_avg = 0.0
3280 step_build_time_avg = 0.0
3281 step_count = 0.0
3282 for _, current_data in revision_data_sorted:
3283 if current_data['value']:
3284 step_perf_time_avg += current_data['perf_time']
3285 step_build_time_avg += current_data['build_time']
3286 step_count += 1
3287 if step_count:
3288 step_perf_time_avg = step_perf_time_avg / step_count
3289 step_build_time_avg = step_build_time_avg / step_count
3290 print
3291 print 'Average build time : %s' % datetime.timedelta(
3292 seconds=int(step_build_time_avg))
3293 print 'Average test time : %s' % datetime.timedelta(
3294 seconds=int(step_perf_time_avg))
3296 def _PrintWarnings(self):
3297 if not self.warnings:
3298 return
3299 print
3300 print 'WARNINGS:'
3301 for w in set(self.warnings):
3302 print ' !!! %s' % w
3304 def _FindOtherRegressions(self, revision_data_sorted, bad_greater_than_good):
3305 other_regressions = []
3306 previous_values = []
3307 previous_id = None
3308 for current_id, current_data in revision_data_sorted:
3309 current_values = current_data['value']
3310 if current_values:
3311 current_values = current_values['values']
3312 if previous_values:
3313 confidence = CalculateConfidence(previous_values, [current_values])
3314 mean_of_prev_runs = CalculateMean(sum(previous_values, []))
3315 mean_of_current_runs = CalculateMean(current_values)
3317 # Check that the potential regression is in the same direction as
3318 # the overall regression. If the mean of the previous runs < the
3319 # mean of the current runs, this local regression is in same
3320 # direction.
3321 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
3322 is_same_direction = (prev_less_than_current if
3323 bad_greater_than_good else not prev_less_than_current)
3325 # Only report potential regressions with high confidence.
3326 if is_same_direction and confidence > 50:
3327 other_regressions.append([current_id, previous_id, confidence])
3328 previous_values.append(current_values)
3329 previous_id = current_id
3330 return other_regressions
3333 def _GetResultsDict(self, revision_data, revision_data_sorted):
3334 # Find range where it possibly broke.
3335 first_working_revision = None
3336 first_working_revision_index = -1
3337 last_broken_revision = None
3338 last_broken_revision_index = -1
3340 for i in xrange(len(revision_data_sorted)):
3341 k, v = revision_data_sorted[i]
3342 if v['passed'] == 1:
3343 if not first_working_revision:
3344 first_working_revision = k
3345 first_working_revision_index = i
3347 if not v['passed']:
3348 last_broken_revision = k
3349 last_broken_revision_index = i
3351 if last_broken_revision != None and first_working_revision != None:
3352 broken_means = []
3353 for i in xrange(0, last_broken_revision_index + 1):
3354 if revision_data_sorted[i][1]['value']:
3355 broken_means.append(revision_data_sorted[i][1]['value']['values'])
3357 working_means = []
3358 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
3359 if revision_data_sorted[i][1]['value']:
3360 working_means.append(revision_data_sorted[i][1]['value']['values'])
3362 # Flatten the lists to calculate mean of all values.
3363 working_mean = sum(working_means, [])
3364 broken_mean = sum(broken_means, [])
3366 # Calculate the approximate size of the regression
3367 mean_of_bad_runs = CalculateMean(broken_mean)
3368 mean_of_good_runs = CalculateMean(working_mean)
3370 regression_size = 100 * CalculateRelativeChange(mean_of_good_runs,
3371 mean_of_bad_runs)
3372 if math.isnan(regression_size):
3373 regression_size = 'zero-to-nonzero'
3375 regression_std_err = math.fabs(CalculatePooledStandardError(
3376 [working_mean, broken_mean]) /
3377 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
3379 # Give a "confidence" in the bisect. At the moment we use how distinct the
3380 # values are before and after the last broken revision, and how noisy the
3381 # overall graph is.
3382 confidence = CalculateConfidence(working_means, broken_means)
3384 culprit_revisions = []
3386 cwd = os.getcwd()
3387 self.ChangeToDepotWorkingDirectory(
3388 revision_data[last_broken_revision]['depot'])
3390 if revision_data[last_broken_revision]['depot'] == 'cros':
3391 # Want to get a list of all the commits and what depots they belong
3392 # to so that we can grab info about each.
3393 cmd = ['repo', 'forall', '-c',
3394 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3395 last_broken_revision, first_working_revision + 1)]
3396 (output, return_code) = RunProcessAndRetrieveOutput(cmd)
3398 changes = []
3399 assert not return_code, 'An error occurred while running'\
3400 ' "%s"' % ' '.join(cmd)
3401 last_depot = None
3402 cwd = os.getcwd()
3403 for l in output.split('\n'):
3404 if l:
3405 # Output will be in form:
3406 # /path_to_depot
3407 # /path_to_other_depot
3408 # <SHA1>
3409 # /path_again
3410 # <SHA1>
3411 # etc.
3412 if l[0] == '/':
3413 last_depot = l
3414 else:
3415 contents = l.split(' ')
3416 if len(contents) > 1:
3417 changes.append([last_depot, contents[0]])
3418 for c in changes:
3419 os.chdir(c[0])
3420 info = self.source_control.QueryRevisionInfo(c[1])
3421 culprit_revisions.append((c[1], info, None))
3422 else:
3423 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
3424 k, v = revision_data_sorted[i]
3425 if k == first_working_revision:
3426 break
3427 self.ChangeToDepotWorkingDirectory(v['depot'])
3428 info = self.source_control.QueryRevisionInfo(k)
3429 culprit_revisions.append((k, info, v['depot']))
3430 os.chdir(cwd)
3432 # Check for any other possible regression ranges
3433 other_regressions = self._FindOtherRegressions(revision_data_sorted,
3434 mean_of_bad_runs > mean_of_good_runs)
3436 # Check for warnings:
3437 if len(culprit_revisions) > 1:
3438 self.warnings.append('Due to build errors, regression range could '
3439 'not be narrowed down to a single commit.')
3440 if self.opts.repeat_test_count == 1:
3441 self.warnings.append('Tests were only set to run once. This may '
3442 'be insufficient to get meaningful results.')
3443 if confidence < 100:
3444 if confidence:
3445 self.warnings.append(
3446 'Confidence is less than 100%. There could be other candidates for '
3447 'this regression. Try bisecting again with increased repeat_count '
3448 'or on a sub-metric that shows the regression more clearly.')
3449 else:
3450 self.warnings.append(
3451 'Confidence is 0%. Try bisecting again on another platform, with '
3452 'increased repeat_count or on a sub-metric that shows the regression '
3453 'more clearly.')
3455 return {
3456 'first_working_revision': first_working_revision,
3457 'last_broken_revision': last_broken_revision,
3458 'culprit_revisions': culprit_revisions,
3459 'other_regressions': other_regressions,
3460 'regression_size': regression_size,
3461 'regression_std_err': regression_std_err,
3462 'confidence': confidence,
3465 def FormatAndPrintResults(self, bisect_results):
3466 """Prints the results from a bisection run in a readable format.
3468 Args
3469 bisect_results: The results from a bisection test run.
3471 revision_data = bisect_results['revision_data']
3472 revision_data_sorted = sorted(revision_data.iteritems(),
3473 key = lambda x: x[1]['sort'])
3474 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
3476 if self.opts.output_buildbot_annotations:
3477 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
3479 print
3480 print 'Full results of bisection:'
3481 for current_id, current_data in revision_data_sorted:
3482 build_status = current_data['passed']
3484 if type(build_status) is bool:
3485 if build_status:
3486 build_status = 'Good'
3487 else:
3488 build_status = 'Bad'
3490 print ' %20s %40s %s' % (current_data['depot'],
3491 current_id, build_status)
3492 print
3494 if self.opts.output_buildbot_annotations:
3495 bisect_utils.OutputAnnotationStepClosed()
3496 # The perf dashboard scrapes the "results" step in order to comment on
3497 # bugs. If you change this, please update the perf dashboard as well.
3498 bisect_utils.OutputAnnotationStepStart('Results')
3500 if results_dict['culprit_revisions'] and results_dict['confidence']:
3501 self._PrintBanner(results_dict)
3502 for culprit in results_dict['culprit_revisions']:
3503 cl, info, depot = culprit
3504 self._PrintRevisionInfo(cl, info, depot)
3505 self._PrintReproSteps()
3506 if results_dict['other_regressions']:
3507 self._PrintOtherRegressions(results_dict['other_regressions'],
3508 revision_data)
3509 else:
3510 self._PrintFailedBanner(results_dict)
3511 self._PrintReproSteps()
3513 self._PrintTestedCommitsTable(revision_data_sorted,
3514 results_dict['first_working_revision'],
3515 results_dict['last_broken_revision'],
3516 results_dict['confidence'])
3517 self._PrintStepTime(revision_data_sorted)
3518 self._PrintWarnings()
3520 if self.opts.output_buildbot_annotations:
3521 bisect_utils.OutputAnnotationStepClosed()
3524 def DetermineAndCreateSourceControl(opts):
3525 """Attempts to determine the underlying source control workflow and returns
3526 a SourceControl object.
3528 Returns:
3529 An instance of a SourceControl object, or None if the current workflow
3530 is unsupported.
3533 (output, _) = RunGit(['rev-parse', '--is-inside-work-tree'])
3535 if output.strip() == 'true':
3536 return GitSourceControl(opts)
3538 return None
3541 def IsPlatformSupported(opts):
3542 """Checks that this platform and build system are supported.
3544 Args:
3545 opts: The options parsed from the command line.
3547 Returns:
3548 True if the platform and build system are supported.
3550 # Haven't tested the script out on any other platforms yet.
3551 supported = ['posix', 'nt']
3552 return os.name in supported
3555 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
3556 """Removes the directory tree specified, and then creates an empty
3557 directory in the same location (if not specified to skip).
3559 Args:
3560 path_to_dir: Path to the directory tree.
3561 skip_makedir: Whether to skip creating empty directory, default is False.
3563 Returns:
3564 True if successful, False if an error occurred.
3566 try:
3567 if os.path.exists(path_to_dir):
3568 shutil.rmtree(path_to_dir)
3569 except OSError, e:
3570 if e.errno != errno.ENOENT:
3571 return False
3573 if not skip_makedir:
3574 return MaybeMakeDirectory(path_to_dir)
3576 return True
3579 def RemoveBuildFiles(build_type):
3580 """Removes build files from previous runs."""
3581 if RmTreeAndMkDir(os.path.join('out', build_type)):
3582 if RmTreeAndMkDir(os.path.join('build', build_type)):
3583 return True
3584 return False
3587 class BisectOptions(object):
3588 """Options to be used when running bisection."""
3589 def __init__(self):
3590 super(BisectOptions, self).__init__()
3592 self.target_platform = 'chromium'
3593 self.build_preference = None
3594 self.good_revision = None
3595 self.bad_revision = None
3596 self.use_goma = None
3597 self.cros_board = None
3598 self.cros_remote_ip = None
3599 self.repeat_test_count = 20
3600 self.truncate_percent = 25
3601 self.max_time_minutes = 20
3602 self.metric = None
3603 self.command = None
3604 self.output_buildbot_annotations = None
3605 self.no_custom_deps = False
3606 self.working_directory = None
3607 self.extra_src = None
3608 self.debug_ignore_build = None
3609 self.debug_ignore_sync = None
3610 self.debug_ignore_perf_test = None
3611 self.gs_bucket = None
3612 self.target_arch = 'ia32'
3613 self.target_build_type = 'Release'
3614 self.builder_host = None
3615 self.builder_port = None
3616 self.bisect_mode = BISECT_MODE_MEAN
3618 def _CreateCommandLineParser(self):
3619 """Creates a parser with bisect options.
3621 Returns:
3622 An instance of optparse.OptionParser.
3624 usage = ('%prog [options] [-- chromium-options]\n'
3625 'Perform binary search on revision history to find a minimal '
3626 'range of revisions where a peformance metric regressed.\n')
3628 parser = optparse.OptionParser(usage=usage)
3630 group = optparse.OptionGroup(parser, 'Bisect options')
3631 group.add_option('-c', '--command',
3632 type='str',
3633 help='A command to execute your performance test at' +
3634 ' each point in the bisection.')
3635 group.add_option('-b', '--bad_revision',
3636 type='str',
3637 help='A bad revision to start bisection. ' +
3638 'Must be later than good revision. May be either a git' +
3639 ' or svn revision.')
3640 group.add_option('-g', '--good_revision',
3641 type='str',
3642 help='A revision to start bisection where performance' +
3643 ' test is known to pass. Must be earlier than the ' +
3644 'bad revision. May be either a git or svn revision.')
3645 group.add_option('-m', '--metric',
3646 type='str',
3647 help='The desired metric to bisect on. For example ' +
3648 '"vm_rss_final_b/vm_rss_f_b"')
3649 group.add_option('-r', '--repeat_test_count',
3650 type='int',
3651 default=20,
3652 help='The number of times to repeat the performance '
3653 'test. Values will be clamped to range [1, 100]. '
3654 'Default value is 20.')
3655 group.add_option('--max_time_minutes',
3656 type='int',
3657 default=20,
3658 help='The maximum time (in minutes) to take running the '
3659 'performance tests. The script will run the performance '
3660 'tests according to --repeat_test_count, so long as it '
3661 'doesn\'t exceed --max_time_minutes. Values will be '
3662 'clamped to range [1, 60].'
3663 'Default value is 20.')
3664 group.add_option('-t', '--truncate_percent',
3665 type='int',
3666 default=25,
3667 help='The highest/lowest % are discarded to form a '
3668 'truncated mean. Values will be clamped to range [0, '
3669 '25]. Default value is 25 (highest/lowest 25% will be '
3670 'discarded).')
3671 group.add_option('--bisect_mode',
3672 type='choice',
3673 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3674 BISECT_MODE_RETURN_CODE],
3675 default=BISECT_MODE_MEAN,
3676 help='The bisect mode. Choices are to bisect on the '
3677 'difference in mean, std_dev, or return_code.')
3678 parser.add_option_group(group)
3680 group = optparse.OptionGroup(parser, 'Build options')
3681 group.add_option('-w', '--working_directory',
3682 type='str',
3683 help='Path to the working directory where the script '
3684 'will do an initial checkout of the chromium depot. The '
3685 'files will be placed in a subdirectory "bisect" under '
3686 'working_directory and that will be used to perform the '
3687 'bisection. This parameter is optional, if it is not '
3688 'supplied, the script will work from the current depot.')
3689 group.add_option('--build_preference',
3690 type='choice',
3691 choices=['msvs', 'ninja', 'make'],
3692 help='The preferred build system to use. On linux/mac '
3693 'the options are make/ninja. On Windows, the options '
3694 'are msvs/ninja.')
3695 group.add_option('--target_platform',
3696 type='choice',
3697 choices=['chromium', 'cros', 'android', 'android-chrome'],
3698 default='chromium',
3699 help='The target platform. Choices are "chromium" '
3700 '(current platform), "cros", or "android". If you '
3701 'specify something other than "chromium", you must be '
3702 'properly set up to build that platform.')
3703 group.add_option('--no_custom_deps',
3704 dest='no_custom_deps',
3705 action="store_true",
3706 default=False,
3707 help='Run the script with custom_deps or not.')
3708 group.add_option('--extra_src',
3709 type='str',
3710 help='Path to a script which can be used to modify '
3711 'the bisect script\'s behavior.')
3712 group.add_option('--cros_board',
3713 type='str',
3714 help='The cros board type to build.')
3715 group.add_option('--cros_remote_ip',
3716 type='str',
3717 help='The remote machine to image to.')
3718 group.add_option('--use_goma',
3719 action="store_true",
3720 help='Add a bunch of extra threads for goma.')
3721 group.add_option('--output_buildbot_annotations',
3722 action="store_true",
3723 help='Add extra annotation output for buildbot.')
3724 group.add_option('--gs_bucket',
3725 default='',
3726 dest='gs_bucket',
3727 type='str',
3728 help=('Name of Google Storage bucket to upload or '
3729 'download build. e.g., chrome-perf'))
3730 group.add_option('--target_arch',
3731 type='choice',
3732 choices=['ia32', 'x64', 'arm'],
3733 default='ia32',
3734 dest='target_arch',
3735 help=('The target build architecture. Choices are "ia32" '
3736 '(default), "x64" or "arm".'))
3737 group.add_option('--target_build_type',
3738 type='choice',
3739 choices=['Release', 'Debug'],
3740 default='Release',
3741 help='The target build type. Choices are "Release" '
3742 '(default), or "Debug".')
3743 group.add_option('--builder_host',
3744 dest='builder_host',
3745 type='str',
3746 help=('Host address of server to produce build by posting'
3747 ' try job request.'))
3748 group.add_option('--builder_port',
3749 dest='builder_port',
3750 type='int',
3751 help=('HTTP port of the server to produce build by posting'
3752 ' try job request.'))
3753 parser.add_option_group(group)
3755 group = optparse.OptionGroup(parser, 'Debug options')
3756 group.add_option('--debug_ignore_build',
3757 action="store_true",
3758 help='DEBUG: Don\'t perform builds.')
3759 group.add_option('--debug_ignore_sync',
3760 action="store_true",
3761 help='DEBUG: Don\'t perform syncs.')
3762 group.add_option('--debug_ignore_perf_test',
3763 action="store_true",
3764 help='DEBUG: Don\'t perform performance tests.')
3765 parser.add_option_group(group)
3766 return parser
3768 def ParseCommandLine(self):
3769 """Parses the command line for bisect options."""
3770 parser = self._CreateCommandLineParser()
3771 (opts, _) = parser.parse_args()
3773 try:
3774 if not opts.command:
3775 raise RuntimeError('missing required parameter: --command')
3777 if not opts.good_revision:
3778 raise RuntimeError('missing required parameter: --good_revision')
3780 if not opts.bad_revision:
3781 raise RuntimeError('missing required parameter: --bad_revision')
3783 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3784 raise RuntimeError('missing required parameter: --metric')
3786 if opts.gs_bucket:
3787 if not cloud_storage.List(opts.gs_bucket):
3788 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3789 if not opts.builder_host:
3790 raise RuntimeError('Must specify try server hostname, when '
3791 'gs_bucket is used: --builder_host')
3792 if not opts.builder_port:
3793 raise RuntimeError('Must specify try server port number, when '
3794 'gs_bucket is used: --builder_port')
3795 if opts.target_platform == 'cros':
3796 # Run sudo up front to make sure credentials are cached for later.
3797 print 'Sudo is required to build cros:'
3798 print
3799 RunProcess(['sudo', 'true'])
3801 if not opts.cros_board:
3802 raise RuntimeError('missing required parameter: --cros_board')
3804 if not opts.cros_remote_ip:
3805 raise RuntimeError('missing required parameter: --cros_remote_ip')
3807 if not opts.working_directory:
3808 raise RuntimeError('missing required parameter: --working_directory')
3810 metric_values = opts.metric.split('/')
3811 if (len(metric_values) != 2 and
3812 opts.bisect_mode != BISECT_MODE_RETURN_CODE):
3813 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3815 opts.metric = metric_values
3816 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3817 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3818 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3819 opts.truncate_percent = opts.truncate_percent / 100.0
3821 for k, v in opts.__dict__.iteritems():
3822 assert hasattr(self, k), "Invalid %s attribute in BisectOptions." % k
3823 setattr(self, k, v)
3824 except RuntimeError, e:
3825 output_string = StringIO.StringIO()
3826 parser.print_help(file=output_string)
3827 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3828 output_string.close()
3829 raise RuntimeError(error_message)
3831 @staticmethod
3832 def FromDict(values):
3833 """Creates an instance of BisectOptions with the values parsed from a
3834 .cfg file.
3836 Args:
3837 values: a dict containing options to set.
3839 Returns:
3840 An instance of BisectOptions.
3842 opts = BisectOptions()
3843 for k, v in values.iteritems():
3844 assert hasattr(opts, k), 'Invalid %s attribute in '\
3845 'BisectOptions.' % k
3846 setattr(opts, k, v)
3848 metric_values = opts.metric.split('/')
3849 if len(metric_values) != 2:
3850 raise RuntimeError("Invalid metric specified: [%s]" % opts.metric)
3852 opts.metric = metric_values
3853 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3854 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3855 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3856 opts.truncate_percent = opts.truncate_percent / 100.0
3858 return opts
3861 def main():
3863 try:
3864 opts = BisectOptions()
3865 opts.ParseCommandLine()
3867 if opts.extra_src:
3868 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3869 if not extra_src:
3870 raise RuntimeError("Invalid or missing --extra_src.")
3871 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3873 if opts.working_directory:
3874 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3875 if opts.no_custom_deps:
3876 custom_deps = None
3877 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3879 os.chdir(os.path.join(os.getcwd(), 'src'))
3881 if not RemoveBuildFiles(opts.target_build_type):
3882 raise RuntimeError('Something went wrong removing the build files.')
3884 if not IsPlatformSupported(opts):
3885 raise RuntimeError("Sorry, this platform isn't supported yet.")
3887 # Check what source control method they're using. Only support git workflow
3888 # at the moment.
3889 source_control = DetermineAndCreateSourceControl(opts)
3891 if not source_control:
3892 raise RuntimeError("Sorry, only the git workflow is supported at the "
3893 "moment.")
3895 # gClient sync seems to fail if you're not in master branch.
3896 if (not source_control.IsInProperBranch() and
3897 not opts.debug_ignore_sync and
3898 not opts.working_directory):
3899 raise RuntimeError("You must switch to master branch to run bisection.")
3900 bisect_test = BisectPerformanceMetrics(source_control, opts)
3901 try:
3902 bisect_results = bisect_test.Run(opts.command,
3903 opts.bad_revision,
3904 opts.good_revision,
3905 opts.metric)
3906 if bisect_results['error']:
3907 raise RuntimeError(bisect_results['error'])
3908 bisect_test.FormatAndPrintResults(bisect_results)
3909 return 0
3910 finally:
3911 bisect_test.PerformCleanup()
3912 except RuntimeError, e:
3913 if opts.output_buildbot_annotations:
3914 # The perf dashboard scrapes the "results" step in order to comment on
3915 # bugs. If you change this, please update the perf dashboard as well.
3916 bisect_utils.OutputAnnotationStepStart('Results')
3917 print 'Error: %s' % e.message
3918 if opts.output_buildbot_annotations:
3919 bisect_utils.OutputAnnotationStepClosed()
3920 return 1
3922 if __name__ == '__main__':
3923 sys.exit(main())