2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
17 An example usage (using svn cl's):
19 ./tools/bisect-perf-regression.py -c\
20 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
21 -g 168222 -b 168232 -m shutdown/simple-user-quit
23 Be aware that if you're using the git workflow and specify an svn revision,
24 the script will attempt to find the git SHA1 where svn changes up to that
25 revision were merged in.
28 An example usage (using git hashes):
30 ./tools/bisect-perf-regression.py -c\
31 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
32 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
33 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
34 -m shutdown/simple-user-quit
54 sys
.path
.append(os
.path
.join(os
.path
.dirname(__file__
), 'telemetry'))
56 from auto_bisect
import bisect_utils
57 from auto_bisect
import post_perf_builder_job
as bisect_builder
58 from telemetry
.util
import cloud_storage
60 # The additional repositories that might need to be bisected.
61 # If the repository has any dependant repositories (such as skia/src needs
62 # skia/include and skia/gyp to be updated), specify them in the 'depends'
63 # so that they're synced appropriately.
65 # src: path to the working directory.
66 # recurse: True if this repositry will get bisected.
67 # depends: A list of other repositories that are actually part of the same
69 # svn: Needed for git workflow to resolve hashes to svn revisions.
70 # from: Parent depot that must be bisected before this is bisected.
71 # deps_var: Key name in vars varible in DEPS file that has revision information.
77 "from" : ['cros', 'android-chrome'],
78 'viewvc': 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
79 'deps_var': 'chromium_rev'
82 "src" : "src/third_party/WebKit",
85 "from" : ['chromium'],
86 'viewvc': 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
87 'deps_var': 'webkit_revision'
90 "src" : "src/third_party/angle",
91 "src_old" : "src/third_party/angle_dx11",
94 "from" : ['chromium'],
96 'deps_var': 'angle_revision'
102 "from" : ['chromium'],
103 "custom_deps": bisect_utils
.GCLIENT_CUSTOM_DEPS_V8
,
104 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
105 'deps_var': 'v8_revision'
107 'v8_bleeding_edge' : {
108 "src" : "src/v8_bleeding_edge",
111 "svn": "https://v8.googlecode.com/svn/branches/bleeding_edge",
113 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
114 'deps_var': 'v8_revision'
117 "src" : "src/third_party/skia/src",
119 "svn" : "http://skia.googlecode.com/svn/trunk/src",
120 "depends" : ['skia/include', 'skia/gyp'],
121 "from" : ['chromium'],
122 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
123 'deps_var': 'skia_revision'
126 "src" : "src/third_party/skia/include",
128 "svn" : "http://skia.googlecode.com/svn/trunk/include",
130 "from" : ['chromium'],
131 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
135 "src" : "src/third_party/skia/gyp",
137 "svn" : "http://skia.googlecode.com/svn/trunk/gyp",
139 "from" : ['chromium'],
140 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
145 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
146 CROS_SDK_PATH
= os
.path
.join('..', 'cros', 'chromite', 'bin', 'cros_sdk')
147 CROS_VERSION_PATTERN
= 'new version number from %s'
148 CROS_CHROMEOS_PATTERN
= 'chromeos-base/chromeos-chrome'
149 CROS_TEST_KEY_PATH
= os
.path
.join('..', 'cros', 'chromite', 'ssh_keys',
151 CROS_SCRIPT_KEY_PATH
= os
.path
.join('..', 'cros', 'src', 'scripts',
152 'mod_for_test_scripts', 'ssh_keys',
155 BUILD_RESULT_SUCCEED
= 0
156 BUILD_RESULT_FAIL
= 1
157 BUILD_RESULT_SKIPPED
= 2
159 # Maximum time in seconds to wait after posting build request to tryserver.
160 # TODO: Change these values based on the actual time taken by buildbots on
162 MAX_MAC_BUILD_TIME
= 14400
163 MAX_WIN_BUILD_TIME
= 14400
164 MAX_LINUX_BUILD_TIME
= 14400
166 # Patch template to add a new file, DEPS.sha under src folder.
167 # This file contains SHA1 value of the DEPS changes made while bisecting
168 # dependency repositories. This patch send along with DEPS patch to tryserver.
169 # When a build requested is posted with a patch, bisect builders on tryserver,
170 # once build is produced, it reads SHA value from this file and appends it
171 # to build archive filename.
172 DEPS_SHA_PATCH
= """diff --git src/DEPS.sha src/DEPS.sha
180 # The possible values of the --bisect_mode flag, which determines what to
181 # use when classifying a revision as "good" or "bad".
182 BISECT_MODE_MEAN
= 'mean'
183 BISECT_MODE_STD_DEV
= 'std_dev'
184 BISECT_MODE_RETURN_CODE
= 'return_code'
186 # The perf dashboard specifically looks for the string
187 # "Estimated Confidence: 95%" to decide whether or not
188 # to cc the author(s). If you change this, please update the perf
191 ===== BISECT JOB RESULTS =====
194 Test Command: %(command)s
195 Test Metric: %(metrics)s
196 Relative Change: %(change)s
197 Estimated Confidence: %(confidence)d%%"""
199 # The perf dashboard specifically looks for the string
200 # "Author : " to parse out who to cc on a bug. If you change the
201 # formatting here, please update the perf dashboard as well.
202 RESULTS_REVISION_INFO
= """
203 ===== SUSPECTED CL(s) =====
204 Subject : %(subject)s
205 Author : %(author)s%(email_info)s%(commit_info)s
206 Date : %(cl_date)s"""
208 REPRO_STEPS_LOCAL
= """
209 ==== INSTRUCTIONS TO REPRODUCE ====
213 REPRO_STEPS_TRYJOB
= """
214 To reproduce on Performance trybot:
215 1. Create new git branch or check out existing branch.
216 2. Edit tools/run-perf-test.cfg (instructions in file) or \
217 third_party/WebKit/Tools/run-perf-test.cfg.
218 a) Take care to strip any src/ directories from the head of \
220 b) On desktop, only --browser=release is supported, on android \
221 --browser=android-chromium-testshell.
222 c) Test command to use: %(command)s
223 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \
224 committed locally to run-perf-test.cfg.
225 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository.
226 $ git cl upload --bypass-hooks
227 4. Send your try job to the tryserver. \
228 [Please make sure to use appropriate bot to reproduce]
229 $ git cl try -m tryserver.chromium.perf -b <bot>
231 For more details please visit \nhttps://sites.google.com/a/chromium.org/dev/\
232 developers/performance-try-bots"""
234 RESULTS_THANKYOU
= """
235 ===== THANK YOU FOR CHOOSING BISECT AIRLINES =====
236 Visit http://www.chromium.org/developers/core-principles for Chrome's policy
238 Contact chrome-perf-dashboard-team with any questions or suggestions about
243 . | ---------'-------'-----------.
244 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-.
245 . \______________.-------._______________)
252 def _AddAdditionalDepotInfo(depot_info
):
253 """Adds additional depot info to the global depot variables."""
254 global DEPOT_DEPS_NAME
256 DEPOT_DEPS_NAME
= dict(DEPOT_DEPS_NAME
.items() +
258 DEPOT_NAMES
= DEPOT_DEPS_NAME
.keys()
261 def CalculateTruncatedMean(data_set
, truncate_percent
):
262 """Calculates the truncated mean of a set of values.
264 Note that this isn't just the mean of the set of values with the highest
265 and lowest values discarded; the non-discarded values are also weighted
266 differently depending how many values are discarded.
269 data_set: Non-empty list of values.
270 truncate_percent: The % from the upper and lower portions of the data set
271 to discard, expressed as a value in [0, 1].
274 The truncated mean as a float.
277 TypeError: The data set was empty after discarding values.
279 if len(data_set
) > 2:
280 data_set
= sorted(data_set
)
282 discard_num_float
= len(data_set
) * truncate_percent
283 discard_num_int
= int(math
.floor(discard_num_float
))
284 kept_weight
= len(data_set
) - discard_num_float
* 2
286 data_set
= data_set
[discard_num_int
:len(data_set
)-discard_num_int
]
288 weight_left
= 1.0 - (discard_num_float
- discard_num_int
)
291 # If the % to discard leaves a fractional portion, need to weight those
293 unweighted_vals
= data_set
[1:len(data_set
)-1]
294 weighted_vals
= [data_set
[0], data_set
[len(data_set
)-1]]
295 weighted_vals
= [w
* weight_left
for w
in weighted_vals
]
296 data_set
= weighted_vals
+ unweighted_vals
298 kept_weight
= len(data_set
)
300 truncated_mean
= reduce(lambda x
, y
: float(x
) + float(y
),
301 data_set
) / kept_weight
303 return truncated_mean
306 def CalculateMean(values
):
307 """Calculates the arithmetic mean of a list of values."""
308 return CalculateTruncatedMean(values
, 0.0)
311 def CalculateConfidence(good_results_lists
, bad_results_lists
):
312 """Calculates a confidence percentage.
314 This is calculated based on how distinct the "good" and "bad" values are,
315 and how noisy the results are. More precisely, the confidence is the quotient
316 of the difference between the closest values across the good and bad groups
317 and the sum of the standard deviations of the good and bad groups.
319 TODO(qyearsley): Replace this confidence function with a function that
320 uses a Student's t-test. The confidence would be (1 - p-value), where
321 p-value is the probability of obtaining the given a set of good and bad
322 values just by chance.
325 good_results_lists: A list of lists of "good" result numbers.
326 bad_results_lists: A list of lists of "bad" result numbers.
329 A number between in the range [0, 100].
331 # Get the distance between the two groups.
332 means_good
= map(CalculateMean
, good_results_lists
)
333 means_bad
= map(CalculateMean
, bad_results_lists
)
334 bounds_good
= (min(means_good
), max(means_good
))
335 bounds_bad
= (min(means_bad
), max(means_bad
))
336 dist_between_groups
= min(
337 math
.fabs(bounds_bad
[1] - bounds_good
[0]),
338 math
.fabs(bounds_bad
[0] - bounds_good
[1]))
340 # Get the sum of the standard deviations of the two groups.
341 good_results_flattened
= sum(good_results_lists
, [])
342 bad_results_flattened
= sum(bad_results_lists
, [])
343 stddev_good
= CalculateStandardDeviation(good_results_flattened
)
344 stddev_bad
= CalculateStandardDeviation(bad_results_flattened
)
345 stddev_sum
= stddev_good
+ stddev_bad
347 confidence
= dist_between_groups
/ (max(0.0001, stddev_sum
))
348 confidence
= int(min(1.0, max(confidence
, 0.0)) * 100.0)
352 def CalculateStandardDeviation(values
):
353 """Calculates the sample standard deviation of the given list of values."""
357 mean
= CalculateMean(values
)
358 differences_from_mean
= [float(x
) - mean
for x
in values
]
359 squared_differences
= [float(x
* x
) for x
in differences_from_mean
]
360 variance
= sum(squared_differences
) / (len(values
) - 1)
361 std_dev
= math
.sqrt(variance
)
366 def CalculateRelativeChange(before
, after
):
367 """Returns the relative change of before and after, relative to before.
369 There are several different ways to define relative difference between
370 two numbers; sometimes it is defined as relative to the smaller number,
371 or to the mean of the two numbers. This version returns the difference
372 relative to the first of the two numbers.
375 before: A number representing an earlier value.
376 after: Another number, representing a later value.
379 A non-negative floating point number; 0.1 represents a 10% change.
385 difference
= after
- before
386 return math
.fabs(difference
/ before
)
389 def CalculatePooledStandardError(work_sets
):
394 for current_set
in work_sets
:
395 std_dev
= CalculateStandardDeviation(current_set
)
396 numerator
+= (len(current_set
) - 1) * std_dev
** 2
397 denominator1
+= len(current_set
) - 1
398 denominator2
+= 1.0 / len(current_set
)
401 return math
.sqrt(numerator
/ denominator1
) * math
.sqrt(denominator2
)
405 def CalculateStandardError(values
):
406 """Calculates the standard error of a list of values."""
410 std_dev
= CalculateStandardDeviation(values
)
412 return std_dev
/ math
.sqrt(len(values
))
415 def IsStringFloat(string_to_check
):
416 """Checks whether or not the given string can be converted to a floating
420 string_to_check: Input string to check if it can be converted to a float.
423 True if the string can be converted to a float.
426 float(string_to_check
)
433 def IsStringInt(string_to_check
):
434 """Checks whether or not the given string can be converted to a integer.
437 string_to_check: Input string to check if it can be converted to an int.
440 True if the string can be converted to an int.
451 """Checks whether or not the script is running on Windows.
454 True if running on Windows.
456 return sys
.platform
== 'cygwin' or sys
.platform
.startswith('win')
459 def Is64BitWindows():
460 """Returns whether or not Windows is a 64-bit version.
463 True if Windows is 64-bit, False if 32-bit.
465 platform
= os
.environ
['PROCESSOR_ARCHITECTURE']
467 platform
= os
.environ
['PROCESSOR_ARCHITEW6432']
469 # Must not be running in WoW64, so PROCESSOR_ARCHITECTURE is correct
472 return platform
in ['AMD64', 'I64']
476 """Checks whether or not the script is running on Linux.
479 True if running on Linux.
481 return sys
.platform
.startswith('linux')
485 """Checks whether or not the script is running on Mac.
488 True if running on Mac.
490 return sys
.platform
.startswith('darwin')
493 def GetSHA1HexDigest(contents
):
494 """Returns secured hash containing hexadecimal for the given contents."""
495 return hashlib
.sha1(contents
).hexdigest()
498 def GetZipFileName(build_revision
=None, target_arch
='ia32', patch_sha
=None):
499 """Gets the archive file name for the given revision."""
501 """Return a string to be used in paths for the platform."""
503 # Build archive for x64 is still stored with 'win32'suffix
504 # (chromium_utils.PlatformName()).
505 if Is64BitWindows() and target_arch
== 'x64':
509 # Android builds too are archived with full-build-linux* prefix.
513 raise NotImplementedError('Unknown platform "%s".' % sys
.platform
)
515 base_name
= 'full-build-%s' % PlatformName()
516 if not build_revision
:
519 build_revision
= '%s_%s' % (build_revision
, patch_sha
)
520 return '%s_%s.zip' % (base_name
, build_revision
)
523 def GetRemoteBuildPath(build_revision
, target_platform
='chromium',
524 target_arch
='ia32', patch_sha
=None):
525 """Compute the url to download the build from."""
526 def GetGSRootFolderName(target_platform
):
527 """Gets Google Cloud Storage root folder names"""
529 if Is64BitWindows() and target_arch
== 'x64':
530 return 'Win x64 Builder'
533 if target_platform
== 'android':
534 return 'android_perf_rel'
535 return 'Linux Builder'
538 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
540 base_filename
= GetZipFileName(
541 build_revision
, target_arch
, patch_sha
)
542 builder_folder
= GetGSRootFolderName(target_platform
)
543 return '%s/%s' % (builder_folder
, base_filename
)
546 def FetchFromCloudStorage(bucket_name
, source_path
, destination_path
):
547 """Fetches file(s) from the Google Cloud Storage.
550 bucket_name: Google Storage bucket name.
551 source_path: Source file path.
552 destination_path: Destination file path.
555 Downloaded file path if exisits, otherwise None.
557 target_file
= os
.path
.join(destination_path
, os
.path
.basename(source_path
))
559 if cloud_storage
.Exists(bucket_name
, source_path
):
560 print 'Fetching file from gs//%s/%s ...' % (bucket_name
, source_path
)
561 cloud_storage
.Get(bucket_name
, source_path
, destination_path
)
562 if os
.path
.exists(target_file
):
565 print ('File gs://%s/%s not found in cloud storage.' % (
566 bucket_name
, source_path
))
567 except Exception as e
:
568 print 'Something went wrong while fetching file from cloud: %s' % e
569 if os
.path
.exists(target_file
):
570 os
.remove(target_file
)
574 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
575 def MaybeMakeDirectory(*path
):
576 """Creates an entire path, if it doesn't already exist."""
577 file_path
= os
.path
.join(*path
)
579 os
.makedirs(file_path
)
581 if e
.errno
!= errno
.EEXIST
:
586 # This is copied from Chromium's project build/scripts/common/chromium_utils.py.
587 def ExtractZip(filename
, output_dir
, verbose
=True):
588 """ Extract the zip archive in the output directory."""
589 MaybeMakeDirectory(output_dir
)
591 # On Linux and Mac, we use the unzip command as it will
592 # handle links and file bits (executable), which is much
593 # easier then trying to do that with ZipInfo options.
595 # The Mac Version of unzip unfortunately does not support Zip64, whereas
596 # the python module does, so we have to fallback to the python zip module
597 # on Mac if the filesize is greater than 4GB.
599 # On Windows, try to use 7z if it is installed, otherwise fall back to python
600 # zip module and pray we don't have files larger than 512MB to unzip.
602 if ((IsMacHost() and os
.path
.getsize(filename
) < 4 * 1024 * 1024 * 1024)
604 unzip_cmd
= ['unzip', '-o']
605 elif IsWindowsHost() and os
.path
.exists('C:\\Program Files\\7-Zip\\7z.exe'):
606 unzip_cmd
= ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
609 # Make sure path is absolute before changing directories.
610 filepath
= os
.path
.abspath(filename
)
611 saved_dir
= os
.getcwd()
613 command
= unzip_cmd
+ [filepath
]
614 result
= RunProcess(command
)
617 raise IOError('unzip failed: %s => %s' % (str(command
), result
))
619 assert IsWindowsHost() or IsMacHost()
620 zf
= zipfile
.ZipFile(filename
)
621 for name
in zf
.namelist():
623 print 'Extracting %s' % name
624 zf
.extract(name
, output_dir
)
626 # Restore permission bits.
627 os
.chmod(os
.path
.join(output_dir
, name
),
628 zf
.getinfo(name
).external_attr
>> 16L)
631 def RunProcess(command
):
632 """Runs an arbitrary command.
634 If output from the call is needed, use RunProcessAndRetrieveOutput instead.
637 command: A list containing the command and args to execute.
640 The return code of the call.
642 # On Windows, use shell=True to get PATH interpretation.
643 shell
= IsWindowsHost()
644 return subprocess
.call(command
, shell
=shell
)
647 def RunProcessAndRetrieveOutput(command
, cwd
=None):
648 """Runs an arbitrary command, returning its output and return code.
650 Since output is collected via communicate(), there will be no output until
651 the call terminates. If you need output while the program runs (ie. so
652 that the buildbot doesn't terminate the script), consider RunProcess().
655 command: A list containing the command and args to execute.
656 cwd: A directory to change to while running the command. The command can be
657 relative to this directory. If this is None, the command will be run in
658 the current directory.
661 A tuple of the output and return code.
664 original_cwd
= os
.getcwd()
667 # On Windows, use shell=True to get PATH interpretation.
668 shell
= IsWindowsHost()
669 proc
= subprocess
.Popen(command
, shell
=shell
, stdout
=subprocess
.PIPE
)
670 (output
, _
) = proc
.communicate()
673 os
.chdir(original_cwd
)
675 return (output
, proc
.returncode
)
678 def RunGit(command
, cwd
=None):
679 """Run a git subcommand, returning its output and return code.
682 command: A list containing the args to git.
683 cwd: A directory to change to while running the git command (optional).
686 A tuple of the output and return code.
688 command
= ['git'] + command
690 return RunProcessAndRetrieveOutput(command
, cwd
=cwd
)
693 def CheckRunGit(command
, cwd
=None):
694 """Run a git subcommand, returning its output and return code. Asserts if
695 the return code of the call is non-zero.
698 command: A list containing the args to git.
701 A tuple of the output and return code.
703 (output
, return_code
) = RunGit(command
, cwd
=cwd
)
705 assert not return_code
, 'An error occurred while running'\
706 ' "git %s"' % ' '.join(command
)
710 def SetBuildSystemDefault(build_system
, use_goma
, goma_dir
):
711 """Sets up any environment variables needed to build with the specified build
715 build_system: A string specifying build system. Currently only 'ninja' or
716 'make' are supported."""
717 if build_system
== 'ninja':
718 gyp_var
= os
.getenv('GYP_GENERATORS')
720 if not gyp_var
or not 'ninja' in gyp_var
:
722 os
.environ
['GYP_GENERATORS'] = gyp_var
+ ',ninja'
724 os
.environ
['GYP_GENERATORS'] = 'ninja'
727 os
.environ
['GYP_DEFINES'] = 'component=shared_library '\
728 'incremental_chrome_dll=1 disable_nacl=1 fastbuild=1 '\
731 elif build_system
== 'make':
732 os
.environ
['GYP_GENERATORS'] = 'make'
734 raise RuntimeError('%s build not supported.' % build_system
)
737 os
.environ
['GYP_DEFINES'] = '%s %s' % (os
.getenv('GYP_DEFINES', ''),
740 os
.environ
['GYP_DEFINES'] += ' gomadir=%s' % goma_dir
743 def BuildWithMake(threads
, targets
, build_type
='Release'):
744 cmd
= ['make', 'BUILDTYPE=%s' % build_type
]
747 cmd
.append('-j%d' % threads
)
751 return_code
= RunProcess(cmd
)
753 return not return_code
756 def BuildWithNinja(threads
, targets
, build_type
='Release'):
757 cmd
= ['ninja', '-C', os
.path
.join('out', build_type
)]
760 cmd
.append('-j%d' % threads
)
764 return_code
= RunProcess(cmd
)
766 return not return_code
769 def BuildWithVisualStudio(targets
, build_type
='Release'):
770 path_to_devenv
= os
.path
.abspath(
771 os
.path
.join(os
.environ
['VS100COMNTOOLS'], '..', 'IDE', 'devenv.com'))
772 path_to_sln
= os
.path
.join(os
.getcwd(), 'chrome', 'chrome.sln')
773 cmd
= [path_to_devenv
, '/build', build_type
, path_to_sln
]
776 cmd
.extend(['/Project', t
])
778 return_code
= RunProcess(cmd
)
780 return not return_code
783 def WriteStringToFile(text
, file_name
):
785 with
open(file_name
, "wb") as f
:
788 raise RuntimeError('Error writing to file [%s]' % file_name
)
791 def ReadStringFromFile(file_name
):
793 with
open(file_name
) as f
:
796 raise RuntimeError('Error reading file [%s]' % file_name
)
799 def ChangeBackslashToSlashInPatch(diff_text
):
800 """Formats file paths in the given text to unix-style paths."""
802 diff_lines
= diff_text
.split('\n')
803 for i
in range(len(diff_lines
)):
804 if (diff_lines
[i
].startswith('--- ') or
805 diff_lines
[i
].startswith('+++ ')):
806 diff_lines
[i
] = diff_lines
[i
].replace('\\', '/')
807 return '\n'.join(diff_lines
)
811 class Builder(object):
812 """Builder is used by the bisect script to build relevant targets and deploy.
814 def __init__(self
, opts
):
815 """Performs setup for building with target build system.
818 opts: Options parsed from command line.
821 if not opts
.build_preference
:
822 opts
.build_preference
= 'msvs'
824 if opts
.build_preference
== 'msvs':
825 if not os
.getenv('VS100COMNTOOLS'):
827 'Path to visual studio could not be determined.')
829 SetBuildSystemDefault(opts
.build_preference
, opts
.use_goma
,
832 if not opts
.build_preference
:
833 if 'ninja' in os
.getenv('GYP_GENERATORS'):
834 opts
.build_preference
= 'ninja'
836 opts
.build_preference
= 'make'
838 SetBuildSystemDefault(opts
.build_preference
, opts
.use_goma
, opts
.goma_dir
)
840 if not bisect_utils
.SetupPlatformBuildEnvironment(opts
):
841 raise RuntimeError('Failed to set platform environment.')
846 if opts
.target_platform
== 'cros':
847 builder
= CrosBuilder(opts
)
848 elif opts
.target_platform
== 'android':
849 builder
= AndroidBuilder(opts
)
850 elif opts
.target_platform
== 'android-chrome':
851 builder
= AndroidChromeBuilder(opts
)
853 builder
= DesktopBuilder(opts
)
856 def Build(self
, depot
, opts
):
857 raise NotImplementedError()
859 def GetBuildOutputDirectory(self
, opts
, src_dir
=None):
860 """Returns the path to the build directory, relative to the checkout root.
862 Assumes that the current working directory is the checkout root.
864 src_dir
= src_dir
or 'src'
865 if opts
.build_preference
== 'ninja' or IsLinuxHost():
866 return os
.path
.join(src_dir
, 'out')
868 return os
.path
.join(src_dir
, 'xcodebuild')
870 return os
.path
.join(src_dir
, 'build')
871 raise NotImplementedError('Unexpected platform %s' % sys
.platform
)
874 class DesktopBuilder(Builder
):
875 """DesktopBuilder is used to build Chromium on linux/mac/windows."""
876 def __init__(self
, opts
):
877 super(DesktopBuilder
, self
).__init
__(opts
)
879 def Build(self
, depot
, opts
):
880 """Builds chromium_builder_perf target using options passed into
884 depot: Current depot being bisected.
885 opts: The options parsed from the command line.
888 True if build was successful.
890 targets
= ['chromium_builder_perf']
896 build_success
= False
897 if opts
.build_preference
== 'make':
898 build_success
= BuildWithMake(threads
, targets
, opts
.target_build_type
)
899 elif opts
.build_preference
== 'ninja':
900 build_success
= BuildWithNinja(threads
, targets
, opts
.target_build_type
)
901 elif opts
.build_preference
== 'msvs':
902 assert IsWindowsHost(), 'msvs is only supported on Windows.'
903 build_success
= BuildWithVisualStudio(targets
, opts
.target_build_type
)
905 assert False, 'No build system defined.'
909 class AndroidBuilder(Builder
):
910 """AndroidBuilder is used to build on android."""
911 def __init__(self
, opts
):
912 super(AndroidBuilder
, self
).__init
__(opts
)
914 def _GetTargets(self
):
915 return ['chrome_shell_apk', 'cc_perftests_apk', 'android_tools']
917 def Build(self
, depot
, opts
):
918 """Builds the android content shell and other necessary tools using options
919 passed into the script.
922 depot: Current depot being bisected.
923 opts: The options parsed from the command line.
926 True if build was successful.
932 build_success
= False
933 if opts
.build_preference
== 'ninja':
934 build_success
= BuildWithNinja(
935 threads
, self
._GetTargets
(), opts
.target_build_type
)
937 assert False, 'No build system defined.'
942 class AndroidChromeBuilder(AndroidBuilder
):
943 """AndroidBuilder is used to build on android's chrome."""
944 def __init__(self
, opts
):
945 super(AndroidChromeBuilder
, self
).__init
__(opts
)
947 def _GetTargets(self
):
948 return AndroidBuilder
._GetTargets
(self
) + ['chrome_apk']
951 class CrosBuilder(Builder
):
952 """CrosBuilder is used to build and image ChromeOS/Chromium when cros is the
954 def __init__(self
, opts
):
955 super(CrosBuilder
, self
).__init
__(opts
)
957 def ImageToTarget(self
, opts
):
958 """Installs latest image to target specified by opts.cros_remote_ip.
961 opts: Program options containing cros_board and cros_remote_ip.
967 # Keys will most likely be set to 0640 after wiping the chroot.
968 os
.chmod(CROS_SCRIPT_KEY_PATH
, 0600)
969 os
.chmod(CROS_TEST_KEY_PATH
, 0600)
970 cmd
= [CROS_SDK_PATH
, '--', './bin/cros_image_to_target.py',
971 '--remote=%s' % opts
.cros_remote_ip
,
972 '--board=%s' % opts
.cros_board
, '--test', '--verbose']
974 return_code
= RunProcess(cmd
)
975 return not return_code
979 def BuildPackages(self
, opts
, depot
):
980 """Builds packages for cros.
983 opts: Program options containing cros_board.
984 depot: The depot being bisected.
989 cmd
= [CROS_SDK_PATH
]
992 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
993 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
998 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
1000 cmd
+= ['BUILDTYPE=%s' % opts
.target_build_type
, './build_packages',
1001 '--board=%s' % opts
.cros_board
]
1002 return_code
= RunProcess(cmd
)
1004 return not return_code
1006 def BuildImage(self
, opts
, depot
):
1007 """Builds test image for cros.
1010 opts: Program options containing cros_board.
1011 depot: The depot being bisected.
1016 cmd
= [CROS_SDK_PATH
]
1019 path_to_chrome
= os
.path
.join(os
.getcwd(), '..')
1020 cmd
+= ['--chrome_root=%s' % path_to_chrome
]
1025 cmd
+= ['CHROME_ORIGIN=LOCAL_SOURCE']
1027 cmd
+= ['BUILDTYPE=%s' % opts
.target_build_type
, '--', './build_image',
1028 '--board=%s' % opts
.cros_board
, 'test']
1030 return_code
= RunProcess(cmd
)
1032 return not return_code
1034 def Build(self
, depot
, opts
):
1035 """Builds targets using options passed into the script.
1038 depot: Current depot being bisected.
1039 opts: The options parsed from the command line.
1042 True if build was successful.
1044 if self
.BuildPackages(opts
, depot
):
1045 if self
.BuildImage(opts
, depot
):
1046 return self
.ImageToTarget(opts
)
1050 class SourceControl(object):
1051 """SourceControl is an abstraction over the underlying source control
1052 system used for chromium. For now only git is supported, but in the
1053 future, the svn workflow could be added as well."""
1055 super(SourceControl
, self
).__init
__()
1057 def SyncToRevisionWithGClient(self
, revision
):
1058 """Uses gclient to sync to the specified revision.
1060 ie. gclient sync --revision <revision>
1063 revision: The git SHA1 or svn CL (depending on workflow).
1066 The return code of the call.
1068 return bisect_utils
.RunGClient(['sync', '--verbose', '--reset', '--force',
1069 '--delete_unversioned_trees', '--nohooks', '--revision', revision
])
1071 def SyncToRevisionWithRepo(self
, timestamp
):
1072 """Uses repo to sync all the underlying git depots to the specified
1076 timestamp: The unix timestamp to sync to.
1079 The return code of the call.
1081 return bisect_utils
.RunRepoSyncAtTimestamp(timestamp
)
1084 class GitSourceControl(SourceControl
):
1085 """GitSourceControl is used to query the underlying source control. """
1086 def __init__(self
, opts
):
1087 super(GitSourceControl
, self
).__init
__()
1093 def GetRevisionList(self
, revision_range_end
, revision_range_start
, cwd
=None):
1094 """Retrieves a list of revisions between |revision_range_start| and
1095 |revision_range_end|.
1098 revision_range_end: The SHA1 for the end of the range.
1099 revision_range_start: The SHA1 for the beginning of the range.
1102 A list of the revisions between |revision_range_start| and
1103 |revision_range_end| (inclusive).
1105 revision_range
= '%s..%s' % (revision_range_start
, revision_range_end
)
1106 cmd
= ['log', '--format=%H', '-10000', '--first-parent', revision_range
]
1107 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
1109 revision_hash_list
= log_output
.split()
1110 revision_hash_list
.append(revision_range_start
)
1112 return revision_hash_list
1114 def SyncToRevision(self
, revision
, sync_client
=None):
1115 """Syncs to the specified revision.
1118 revision: The revision to sync to.
1119 use_gclient: Specifies whether or not we should sync using gclient or
1120 just use source control directly.
1127 results
= RunGit(['checkout', revision
])[1]
1128 elif sync_client
== 'gclient':
1129 results
= self
.SyncToRevisionWithGClient(revision
)
1130 elif sync_client
== 'repo':
1131 results
= self
.SyncToRevisionWithRepo(revision
)
1135 def ResolveToRevision(self
, revision_to_check
, depot
, search
, cwd
=None):
1136 """If an SVN revision is supplied, try to resolve it to a git SHA1.
1139 revision_to_check: The user supplied revision string that may need to be
1140 resolved to a git SHA1.
1141 depot: The depot the revision_to_check is from.
1142 search: The number of changelists to try if the first fails to resolve
1143 to a git hash. If the value is negative, the function will search
1144 backwards chronologically, otherwise it will search forward.
1147 A string containing a git SHA1 hash, otherwise None.
1149 # Android-chrome is git only, so no need to resolve this to anything else.
1150 if depot
== 'android-chrome':
1151 return revision_to_check
1154 if not IsStringInt(revision_to_check
):
1155 return revision_to_check
1157 depot_svn
= 'svn://svn.chromium.org/chrome/trunk/src'
1159 if depot
!= 'chromium':
1160 depot_svn
= DEPOT_DEPS_NAME
[depot
]['svn']
1162 svn_revision
= int(revision_to_check
)
1166 search_range
= xrange(svn_revision
, svn_revision
+ search
, 1)
1168 search_range
= xrange(svn_revision
, svn_revision
+ search
, -1)
1170 for i
in search_range
:
1171 svn_pattern
= 'git-svn-id: %s@%d' % (depot_svn
, i
)
1172 cmd
= ['log', '--format=%H', '-1', '--grep', svn_pattern
,
1175 (log_output
, return_code
) = RunGit(cmd
, cwd
=cwd
)
1177 assert not return_code
, 'An error occurred while running'\
1178 ' "git %s"' % ' '.join(cmd
)
1181 log_output
= log_output
.strip()
1184 git_revision
= log_output
1190 if IsStringInt(revision_to_check
):
1191 return int(revision_to_check
)
1194 os
.chdir(os
.path
.join(os
.getcwd(), 'src', 'third_party',
1195 'chromiumos-overlay'))
1196 pattern
= CROS_VERSION_PATTERN
% revision_to_check
1197 cmd
= ['log', '--format=%ct', '-1', '--grep', pattern
]
1201 log_output
= CheckRunGit(cmd
, cwd
=cwd
)
1203 git_revision
= log_output
1204 git_revision
= int(log_output
.strip())
1209 def IsInProperBranch(self
):
1210 """Confirms they're in the master branch for performing the bisection.
1211 This is needed or gclient will fail to sync properly.
1214 True if the current branch on src is 'master'
1216 cmd
= ['rev-parse', '--abbrev-ref', 'HEAD']
1217 log_output
= CheckRunGit(cmd
)
1218 log_output
= log_output
.strip()
1220 return log_output
== "master"
1222 def SVNFindRev(self
, revision
, cwd
=None):
1223 """Maps directly to the 'git svn find-rev' command.
1226 revision: The git SHA1 to use.
1229 An integer changelist #, otherwise None.
1232 cmd
= ['svn', 'find-rev', revision
]
1234 output
= CheckRunGit(cmd
, cwd
)
1235 svn_revision
= output
.strip()
1237 if IsStringInt(svn_revision
):
1238 return int(svn_revision
)
1242 def QueryRevisionInfo(self
, revision
, cwd
=None):
1243 """Gathers information on a particular revision, such as author's name,
1244 email, subject, and date.
1247 revision: Revision you want to gather information on.
1249 A dict in the following format:
1260 formats
= ['%cN', '%cE', '%s', '%cD', '%b']
1261 targets
= ['author', 'email', 'subject', 'date', 'body']
1263 for i
in xrange(len(formats
)):
1264 cmd
= ['log', '--format=%s' % formats
[i
], '-1', revision
]
1265 output
= CheckRunGit(cmd
, cwd
=cwd
)
1266 commit_info
[targets
[i
]] = output
.rstrip()
1270 def CheckoutFileAtRevision(self
, file_name
, revision
, cwd
=None):
1271 """Performs a checkout on a file at the given revision.
1276 return not RunGit(['checkout', revision
, file_name
], cwd
=cwd
)[1]
1278 def RevertFileToHead(self
, file_name
):
1279 """Unstages a file and returns it to HEAD.
1284 # Reset doesn't seem to return 0 on success.
1285 RunGit(['reset', 'HEAD', file_name
])
1287 return not RunGit(['checkout', bisect_utils
.FILE_DEPS_GIT
])[1]
1289 def QueryFileRevisionHistory(self
, filename
, revision_start
, revision_end
):
1290 """Returns a list of commits that modified this file.
1293 filename: Name of file.
1294 revision_start: Start of revision range.
1295 revision_end: End of revision range.
1298 Returns a list of commits that touched this file.
1300 cmd
= ['log', '--format=%H', '%s~1..%s' % (revision_start
, revision_end
),
1302 output
= CheckRunGit(cmd
)
1304 return [o
for o
in output
.split('\n') if o
]
1307 class BisectPerformanceMetrics(object):
1308 """This class contains functionality to perform a bisection of a range of
1309 revisions to narrow down where performance regressions may have occurred.
1311 The main entry-point is the Run method.
1314 def __init__(self
, source_control
, opts
):
1315 super(BisectPerformanceMetrics
, self
).__init
__()
1318 self
.source_control
= source_control
1319 self
.src_cwd
= os
.getcwd()
1320 self
.cros_cwd
= os
.path
.join(os
.getcwd(), '..', 'cros')
1322 self
.cleanup_commands
= []
1324 self
.builder
= Builder
.FromOpts(opts
)
1326 # This always starts true since the script grabs latest first.
1327 self
.was_blink
= True
1329 for d
in DEPOT_NAMES
:
1330 # The working directory of each depot is just the path to the depot, but
1331 # since we're already in 'src', we can skip that part.
1333 self
.depot_cwd
[d
] = os
.path
.join(
1334 self
.src_cwd
, DEPOT_DEPS_NAME
[d
]['src'][4:])
1336 def PerformCleanup(self
):
1337 """Performs cleanup when script is finished."""
1338 os
.chdir(self
.src_cwd
)
1339 for c
in self
.cleanup_commands
:
1341 shutil
.move(c
[1], c
[2])
1343 assert False, 'Invalid cleanup command.'
1345 def GetRevisionList(self
, depot
, bad_revision
, good_revision
):
1346 """Retrieves a list of all the commits between the bad revision and
1347 last known good revision."""
1349 revision_work_list
= []
1352 revision_range_start
= good_revision
1353 revision_range_end
= bad_revision
1356 self
.ChangeToDepotWorkingDirectory('cros')
1358 # Print the commit timestamps for every commit in the revision time
1359 # range. We'll sort them and bisect by that. There is a remote chance that
1360 # 2 (or more) commits will share the exact same timestamp, but it's
1361 # probably safe to ignore that case.
1362 cmd
= ['repo', 'forall', '-c',
1363 'git log --format=%%ct --before=%d --after=%d' % (
1364 revision_range_end
, revision_range_start
)]
1365 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1367 assert not return_code
, 'An error occurred while running'\
1368 ' "%s"' % ' '.join(cmd
)
1372 revision_work_list
= list(set(
1373 [int(o
) for o
in output
.split('\n') if IsStringInt(o
)]))
1374 revision_work_list
= sorted(revision_work_list
, reverse
=True)
1376 cwd
= self
._GetDepotDirectory
(depot
)
1377 revision_work_list
= self
.source_control
.GetRevisionList(bad_revision
,
1378 good_revision
, cwd
=cwd
)
1380 return revision_work_list
1382 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self
, revision
):
1383 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1385 if IsStringInt(svn_revision
):
1386 # V8 is tricky to bisect, in that there are only a few instances when
1387 # we can dive into bleeding_edge and get back a meaningful result.
1388 # Try to detect a V8 "business as usual" case, which is when:
1389 # 1. trunk revision N has description "Version X.Y.Z"
1390 # 2. bleeding_edge revision (N-1) has description "Prepare push to
1391 # trunk. Now working on X.Y.(Z+1)."
1393 # As of 01/24/2014, V8 trunk descriptions are formatted:
1394 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
1395 # So we can just try parsing that out first and fall back to the old way.
1396 v8_dir
= self
._GetDepotDirectory
('v8')
1397 v8_bleeding_edge_dir
= self
._GetDepotDirectory
('v8_bleeding_edge')
1399 revision_info
= self
.source_control
.QueryRevisionInfo(revision
,
1402 version_re
= re
.compile("Version (?P<values>[0-9,.]+)")
1404 regex_results
= version_re
.search(revision_info
['subject'])
1409 # Look for "based on bleeding_edge" and parse out revision
1410 if 'based on bleeding_edge' in revision_info
['subject']:
1412 bleeding_edge_revision
= revision_info
['subject'].split(
1413 'bleeding_edge revision r')[1]
1414 bleeding_edge_revision
= int(bleeding_edge_revision
.split(')')[0])
1415 git_revision
= self
.source_control
.ResolveToRevision(
1416 bleeding_edge_revision
, 'v8_bleeding_edge', 1,
1417 cwd
=v8_bleeding_edge_dir
)
1419 except (IndexError, ValueError):
1422 if not git_revision
:
1423 # Wasn't successful, try the old way of looking for "Prepare push to"
1424 git_revision
= self
.source_control
.ResolveToRevision(
1425 int(svn_revision
) - 1, 'v8_bleeding_edge', -1,
1426 cwd
=v8_bleeding_edge_dir
)
1429 revision_info
= self
.source_control
.QueryRevisionInfo(git_revision
,
1430 cwd
=v8_bleeding_edge_dir
)
1432 if 'Prepare push to trunk' in revision_info
['subject']:
1436 def _GetNearestV8BleedingEdgeFromTrunk(self
, revision
, search_forward
=True):
1437 cwd
= self
._GetDepotDirectory
('v8')
1438 cmd
= ['log', '--format=%ct', '-1', revision
]
1439 output
= CheckRunGit(cmd
, cwd
=cwd
)
1440 commit_time
= int(output
)
1444 cmd
= ['log', '--format=%H', '-10', '--after=%d' % commit_time
,
1446 output
= CheckRunGit(cmd
, cwd
=cwd
)
1447 output
= output
.split()
1449 commits
= reversed(commits
)
1451 cmd
= ['log', '--format=%H', '-10', '--before=%d' % commit_time
,
1453 output
= CheckRunGit(cmd
, cwd
=cwd
)
1454 output
= output
.split()
1457 bleeding_edge_revision
= None
1460 bleeding_edge_revision
= self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(c
)
1461 if bleeding_edge_revision
:
1464 return bleeding_edge_revision
1466 def _ParseRevisionsFromDEPSFileManually(self
, deps_file_contents
):
1467 """Manually parses the vars section of the DEPS file to determine
1468 chromium/blink/etc... revisions.
1471 A dict in the format {depot:revision} if successful, otherwise None.
1473 # We'll parse the "vars" section of the DEPS file.
1474 rxp
= re
.compile('vars = {(?P<vars_body>[^}]+)', re
.MULTILINE
)
1475 re_results
= rxp
.search(deps_file_contents
)
1481 # We should be left with a series of entries in the vars component of
1482 # the DEPS file with the following format:
1483 # 'depot_name': 'revision',
1484 vars_body
= re_results
.group('vars_body')
1485 rxp
= re
.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
1487 re_results
= rxp
.findall(vars_body
)
1489 return dict(re_results
)
1491 def _ParseRevisionsFromDEPSFile(self
, depot
):
1492 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1493 be needed if the bisect recurses into those depots later.
1496 depot: Depot being bisected.
1499 A dict in the format {depot:revision} if successful, otherwise None.
1502 deps_data
= {'Var': lambda _
: deps_data
["vars"][_
],
1503 'From': lambda *args
: None
1505 execfile(bisect_utils
.FILE_DEPS_GIT
, {}, deps_data
)
1506 deps_data
= deps_data
['deps']
1508 rxp
= re
.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1510 for depot_name
, depot_data
in DEPOT_DEPS_NAME
.iteritems():
1511 if (depot_data
.get('platform') and
1512 depot_data
.get('platform') != os
.name
):
1515 if (depot_data
.get('recurse') and depot
in depot_data
.get('from')):
1516 depot_data_src
= depot_data
.get('src') or depot_data
.get('src_old')
1517 src_dir
= deps_data
.get(depot_data_src
)
1519 self
.depot_cwd
[depot_name
] = os
.path
.join(self
.src_cwd
,
1521 re_results
= rxp
.search(src_dir
)
1523 results
[depot_name
] = re_results
.group('revision')
1525 warning_text
= ('Couldn\'t parse revision for %s while bisecting '
1526 '%s' % (depot_name
, depot
))
1527 if not warning_text
in self
.warnings
:
1528 self
.warnings
.append(warning_text
)
1530 results
[depot_name
] = None
1533 deps_file_contents
= ReadStringFromFile(bisect_utils
.FILE_DEPS_GIT
)
1534 parse_results
= self
._ParseRevisionsFromDEPSFileManually
(
1537 for depot_name
, depot_revision
in parse_results
.iteritems():
1538 depot_revision
= depot_revision
.strip('@')
1539 print depot_name
, depot_revision
1540 for current_name
, current_data
in DEPOT_DEPS_NAME
.iteritems():
1541 if (current_data
.has_key('deps_var') and
1542 current_data
['deps_var'] == depot_name
):
1543 src_name
= current_name
1544 results
[src_name
] = depot_revision
1548 def Get3rdPartyRevisionsFromCurrentRevision(self
, depot
, revision
):
1549 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1552 A dict in the format {depot:revision} if successful, otherwise None.
1555 self
.ChangeToDepotWorkingDirectory(depot
)
1559 if depot
== 'chromium' or depot
== 'android-chrome':
1560 results
= self
._ParseRevisionsFromDEPSFile
(depot
)
1562 elif depot
== 'cros':
1563 cmd
= [CROS_SDK_PATH
, '--', 'portageq-%s' % self
.opts
.cros_board
,
1564 'best_visible', '/build/%s' % self
.opts
.cros_board
, 'ebuild',
1565 CROS_CHROMEOS_PATTERN
]
1566 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
1568 assert not return_code
, 'An error occurred while running' \
1569 ' "%s"' % ' '.join(cmd
)
1571 if len(output
) > CROS_CHROMEOS_PATTERN
:
1572 output
= output
[len(CROS_CHROMEOS_PATTERN
):]
1575 output
= output
.split('_')[0]
1578 contents
= output
.split('.')
1580 version
= contents
[2]
1582 if contents
[3] != '0':
1583 warningText
= 'Chrome version: %s.%s but using %s.0 to bisect.' % \
1584 (version
, contents
[3], version
)
1585 if not warningText
in self
.warnings
:
1586 self
.warnings
.append(warningText
)
1589 self
.ChangeToDepotWorkingDirectory('chromium')
1590 return_code
= CheckRunGit(['log', '-1', '--format=%H',
1591 '--author=chrome-release@google.com', '--grep=to %s' % version
,
1595 results
['chromium'] = output
.strip()
1597 # We can't try to map the trunk revision to bleeding edge yet, because
1598 # we don't know which direction to try to search in. Have to wait until
1599 # the bisect has narrowed the results down to 2 v8 rolls.
1600 results
['v8_bleeding_edge'] = None
1604 def BackupOrRestoreOutputdirectory(self
, restore
=False, build_type
='Release'):
1605 """Backs up or restores build output directory based on restore argument.
1608 restore: Indicates whether to restore or backup. Default is False(Backup)
1609 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1612 Path to backup or restored location as string. otherwise None if it fails.
1614 build_dir
= os
.path
.abspath(
1615 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1616 source_dir
= os
.path
.join(build_dir
, build_type
)
1617 destination_dir
= os
.path
.join(build_dir
, '%s.bak' % build_type
)
1619 source_dir
, destination_dir
= destination_dir
, source_dir
1620 if os
.path
.exists(source_dir
):
1621 RmTreeAndMkDir(destination_dir
, skip_makedir
=True)
1622 shutil
.move(source_dir
, destination_dir
)
1623 return destination_dir
1626 def GetBuildArchiveForRevision(self
, revision
, gs_bucket
, target_arch
,
1627 patch_sha
, out_dir
):
1628 """Checks and downloads build archive for a given revision.
1630 Checks for build archive with Git hash or SVN revision. If either of the
1631 file exists, then downloads the archive file.
1634 revision: A Git hash revision.
1635 gs_bucket: Cloud storage bucket name
1636 target_arch: 32 or 64 bit build target
1637 patch: A DEPS patch (used while bisecting 3rd party repositories).
1638 out_dir: Build output directory where downloaded file is stored.
1641 Downloaded archive file path if exists, otherwise None.
1643 # Source archive file path on cloud storage using Git revision.
1644 source_file
= GetRemoteBuildPath(
1645 revision
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1646 downloaded_archive
= FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1647 if not downloaded_archive
:
1648 # Get SVN revision for the given SHA.
1649 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1651 # Source archive file path on cloud storage using SVN revision.
1652 source_file
= GetRemoteBuildPath(
1653 svn_revision
, self
.opts
.target_platform
, target_arch
, patch_sha
)
1654 return FetchFromCloudStorage(gs_bucket
, source_file
, out_dir
)
1655 return downloaded_archive
1657 def DownloadCurrentBuild(self
, revision
, build_type
='Release', patch
=None):
1658 """Downloads the build archive for the given revision.
1661 revision: The Git revision to download or build.
1662 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1663 patch: A DEPS patch (used while bisecting 3rd party repositories).
1666 True if download succeeds, otherwise False.
1670 # Get the SHA of the DEPS changes patch.
1671 patch_sha
= GetSHA1HexDigest(patch
)
1673 # Update the DEPS changes patch with a patch to create a new file named
1674 # 'DEPS.sha' and add patch_sha evaluated above to it.
1675 patch
= '%s\n%s' % (patch
, DEPS_SHA_PATCH
% {'deps_sha': patch_sha
})
1677 # Get Build output directory
1678 abs_build_dir
= os
.path
.abspath(
1679 self
.builder
.GetBuildOutputDirectory(self
.opts
, self
.src_cwd
))
1681 fetch_build_func
= lambda: self
.GetBuildArchiveForRevision(
1682 revision
, self
.opts
.gs_bucket
, self
.opts
.target_arch
,
1683 patch_sha
, abs_build_dir
)
1685 # Downloaded archive file path, downloads build archive for given revision.
1686 downloaded_file
= fetch_build_func()
1688 # When build archive doesn't exists, post a build request to tryserver
1689 # and wait for the build to be produced.
1690 if not downloaded_file
:
1691 downloaded_file
= self
.PostBuildRequestAndWait(
1692 revision
, fetch_build
=fetch_build_func
, patch
=patch
)
1693 if not downloaded_file
:
1696 # Generic name for the archive, created when archive file is extracted.
1697 output_dir
= os
.path
.join(
1698 abs_build_dir
, GetZipFileName(target_arch
=self
.opts
.target_arch
))
1699 # Unzip build archive directory.
1701 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1702 ExtractZip(downloaded_file
, abs_build_dir
)
1703 if os
.path
.exists(output_dir
):
1704 self
.BackupOrRestoreOutputdirectory(restore
=False)
1705 # Build output directory based on target(e.g. out/Release, out/Debug).
1706 target_build_output_dir
= os
.path
.join(abs_build_dir
, build_type
)
1707 print 'Moving build from %s to %s' % (
1708 output_dir
, target_build_output_dir
)
1709 shutil
.move(output_dir
, target_build_output_dir
)
1711 raise IOError('Missing extracted folder %s ' % output_dir
)
1712 except Exception as e
:
1713 print 'Somewthing went wrong while extracting archive file: %s' % e
1714 self
.BackupOrRestoreOutputdirectory(restore
=True)
1715 # Cleanup any leftovers from unzipping.
1716 if os
.path
.exists(output_dir
):
1717 RmTreeAndMkDir(output_dir
, skip_makedir
=True)
1719 # Delete downloaded archive
1720 if os
.path
.exists(downloaded_file
):
1721 os
.remove(downloaded_file
)
1724 def WaitUntilBuildIsReady(self
, fetch_build
, bot_name
, builder_host
,
1725 builder_port
, build_request_id
, max_timeout
):
1726 """Waits until build is produced by bisect builder on tryserver.
1729 fetch_build: Function to check and download build from cloud storage.
1730 bot_name: Builder bot name on tryserver.
1731 builder_host Tryserver hostname.
1732 builder_port: Tryserver port.
1733 build_request_id: A unique ID of the build request posted to tryserver.
1734 max_timeout: Maximum time to wait for the build.
1737 Downloaded archive file path if exists, otherwise None.
1739 # Build number on the tryserver.
1741 # Interval to check build on cloud storage.
1743 # Interval to check build status on tryserver.
1744 status_check_interval
= 600
1745 last_status_check
= time
.time()
1746 start_time
= time
.time()
1748 # Checks for build on gs://chrome-perf and download if exists.
1751 return (res
, 'Build successfully found')
1752 elapsed_status_check
= time
.time() - last_status_check
1753 # To avoid overloading tryserver with status check requests, we check
1754 # build status for every 10 mins.
1755 if elapsed_status_check
> status_check_interval
:
1756 last_status_check
= time
.time()
1758 # Get the build number on tryserver for the current build.
1759 build_num
= bisect_builder
.GetBuildNumFromBuilder(
1760 build_request_id
, bot_name
, builder_host
, builder_port
)
1761 # Check the status of build using the build number.
1762 # Note: Build is treated as PENDING if build number is not found
1763 # on the the tryserver.
1764 build_status
, status_link
= bisect_builder
.GetBuildStatus(
1765 build_num
, bot_name
, builder_host
, builder_port
)
1766 if build_status
== bisect_builder
.FAILED
:
1767 return (None, 'Failed to produce build, log: %s' % status_link
)
1768 elapsed_time
= time
.time() - start_time
1769 if elapsed_time
> max_timeout
:
1770 return (None, 'Timed out: %ss without build' % max_timeout
)
1772 print 'Time elapsed: %ss without build.' % elapsed_time
1773 time
.sleep(poll_interval
)
1774 # For some reason, mac bisect bots were not flushing stdout periodically.
1775 # As a result buildbot command is timed-out. Flush stdout on all platforms
1776 # while waiting for build.
1779 def PostBuildRequestAndWait(self
, revision
, fetch_build
, patch
=None):
1780 """POSTs the build request job to the tryserver instance.
1782 A try job build request is posted to tryserver.chromium.perf master,
1783 and waits for the binaries to be produced and archived on cloud storage.
1784 Once the build is ready and stored onto cloud, build archive is downloaded
1785 into the output folder.
1788 revision: A Git hash revision.
1789 fetch_build: Function to check and download build from cloud storage.
1790 patch: A DEPS patch (used while bisecting 3rd party repositories).
1793 Downloaded archive file path when requested build exists and download is
1794 successful, otherwise None.
1796 # Get SVN revision for the given SHA.
1797 svn_revision
= self
.source_control
.SVNFindRev(revision
)
1798 if not svn_revision
:
1800 'Failed to determine SVN revision for %s' % revision
)
1802 def GetBuilderNameAndBuildTime(target_platform
, target_arch
='ia32'):
1803 """Gets builder bot name and buildtime in seconds based on platform."""
1804 # Bot names should match the one listed in tryserver.chromium's
1805 # master.cfg which produces builds for bisect.
1807 if Is64BitWindows() and target_arch
== 'x64':
1808 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1809 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME
)
1811 if target_platform
== 'android':
1812 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1813 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME
)
1815 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME
)
1816 raise NotImplementedError('Unsupported Platform "%s".' % sys
.platform
)
1820 bot_name
, build_timeout
= GetBuilderNameAndBuildTime(
1821 self
.opts
.target_platform
, self
.opts
.target_arch
)
1822 builder_host
= self
.opts
.builder_host
1823 builder_port
= self
.opts
.builder_port
1824 # Create a unique ID for each build request posted to tryserver builders.
1825 # This ID is added to "Reason" property in build's json.
1826 build_request_id
= GetSHA1HexDigest(
1827 '%s-%s-%s' % (svn_revision
, patch
, time
.time()))
1829 # Creates a try job description.
1830 job_args
= {'host': builder_host
,
1831 'port': builder_port
,
1832 'revision': 'src@%s' % svn_revision
,
1834 'name': build_request_id
1836 # Update patch information if supplied.
1838 job_args
['patch'] = patch
1839 # Posts job to build the revision on the server.
1840 if bisect_builder
.PostTryJob(job_args
):
1841 target_file
, error_msg
= self
.WaitUntilBuildIsReady(fetch_build
,
1848 print '%s [revision: %s]' % (error_msg
, svn_revision
)
1851 print 'Failed to post build request for revision: [%s]' % svn_revision
1854 def IsDownloadable(self
, depot
):
1855 """Checks if build is downloadable based on target platform and depot."""
1856 if (self
.opts
.target_platform
in ['chromium', 'android'] and
1857 self
.opts
.gs_bucket
):
1858 return (depot
== 'chromium' or
1859 'chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
1860 'v8' in DEPOT_DEPS_NAME
[depot
]['from'])
1863 def UpdateDeps(self
, revision
, depot
, deps_file
):
1864 """Updates DEPS file with new revision of dependency repository.
1866 This method search DEPS for a particular pattern in which depot revision
1867 is specified (e.g "webkit_revision": "123456"). If a match is found then
1868 it resolves the given git hash to SVN revision and replace it in DEPS file.
1871 revision: A git hash revision of the dependency repository.
1872 depot: Current depot being bisected.
1873 deps_file: Path to DEPS file.
1876 True if DEPS file is modified successfully, otherwise False.
1878 if not os
.path
.exists(deps_file
):
1881 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
1882 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1884 print 'DEPS update not supported for Depot: %s', depot
1887 # Hack to Angle repository because, in DEPS file "vars" dictionary variable
1888 # contains "angle_revision" key that holds git hash instead of SVN revision.
1889 # And sometime "angle_revision" key is not specified in "vars" variable,
1890 # in such cases check "deps" dictionary variable that matches
1891 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1892 if depot
== 'angle':
1893 return self
.UpdateDEPSForAngle(revision
, depot
, deps_file
)
1896 deps_contents
= ReadStringFromFile(deps_file
)
1897 # Check whether the depot and revision pattern in DEPS file vars
1898 # e.g. for webkit the format is "webkit_revision": "12345".
1899 deps_revision
= re
.compile(r
'(?<="%s": ")([0-9]+)(?=")' % deps_var
,
1901 match
= re
.search(deps_revision
, deps_contents
)
1903 svn_revision
= self
.source_control
.SVNFindRev(
1904 revision
, self
._GetDepotDirectory
(depot
))
1905 if not svn_revision
:
1906 print 'Could not determine SVN revision for %s' % revision
1908 # Update the revision information for the given depot
1909 new_data
= re
.sub(deps_revision
, str(svn_revision
), deps_contents
)
1911 # For v8_bleeding_edge revisions change V8 branch in order
1912 # to fetch bleeding edge revision.
1913 if depot
== 'v8_bleeding_edge':
1914 new_data
= self
.UpdateV8Branch(new_data
)
1917 # Write changes to DEPS file
1918 WriteStringToFile(new_data
, deps_file
)
1921 print 'Something went wrong while updating DEPS file. [%s]' % e
1924 def UpdateV8Branch(self
, deps_content
):
1925 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
1927 Check for "v8_branch" in DEPS file if exists update its value
1928 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
1929 variable from DEPS revision 254916, therefore check for "src/v8":
1930 <v8 source path> in DEPS in order to support prior DEPS revisions
1934 deps_content: DEPS file contents to be modified.
1937 Modified DEPS file contents as a string.
1939 new_branch
= r
'branches/bleeding_edge'
1940 v8_branch_pattern
= re
.compile(r
'(?<="v8_branch": ")(.*)(?=")')
1941 if re
.search(v8_branch_pattern
, deps_content
):
1942 deps_content
= re
.sub(v8_branch_pattern
, new_branch
, deps_content
)
1944 # Replaces the branch assigned to "src/v8" key in DEPS file.
1945 # Format of "src/v8" in DEPS:
1947 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
1948 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
1949 v8_src_pattern
= re
.compile(
1950 r
'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re
.MULTILINE
)
1951 if re
.search(v8_src_pattern
, deps_content
):
1952 deps_content
= re
.sub(v8_src_pattern
, new_branch
, deps_content
)
1955 def UpdateDEPSForAngle(self
, revision
, depot
, deps_file
):
1956 """Updates DEPS file with new revision for Angle repository.
1958 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
1959 variable contains "angle_revision" key that holds git hash instead of
1962 And sometimes "angle_revision" key is not specified in "vars" variable,
1963 in such cases check "deps" dictionary variable that matches
1964 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1966 deps_var
= DEPOT_DEPS_NAME
[depot
]['deps_var']
1968 deps_contents
= ReadStringFromFile(deps_file
)
1969 # Check whether the depot and revision pattern in DEPS file vars variable
1970 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
1971 angle_rev_pattern
= re
.compile(r
'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
1972 deps_var
, re
.MULTILINE
)
1973 match
= re
.search(angle_rev_pattern
% deps_var
, deps_contents
)
1975 # Update the revision information for the given depot
1976 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
1978 # Check whether the depot and revision pattern in DEPS file deps
1980 # "src/third_party/angle": Var("chromium_git") +
1981 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
1982 angle_rev_pattern
= re
.compile(
1983 r
'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re
.MULTILINE
)
1984 match
= re
.search(angle_rev_pattern
, deps_contents
)
1986 print 'Could not find angle revision information in DEPS file.'
1988 new_data
= re
.sub(angle_rev_pattern
, revision
, deps_contents
)
1989 # Write changes to DEPS file
1990 WriteStringToFile(new_data
, deps_file
)
1993 print 'Something went wrong while updating DEPS file, %s' % e
1996 def CreateDEPSPatch(self
, depot
, revision
):
1997 """Modifies DEPS and returns diff as text.
2000 depot: Current depot being bisected.
2001 revision: A git hash revision of the dependency repository.
2004 A tuple with git hash of chromium revision and DEPS patch text.
2006 deps_file_path
= os
.path
.join(self
.src_cwd
, bisect_utils
.FILE_DEPS
)
2007 if not os
.path
.exists(deps_file_path
):
2008 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path
)
2009 # Get current chromium revision (git hash).
2010 chromium_sha
= CheckRunGit(['rev-parse', 'HEAD']).strip()
2011 if not chromium_sha
:
2012 raise RuntimeError('Failed to determine Chromium revision for %s' %
2014 if ('chromium' in DEPOT_DEPS_NAME
[depot
]['from'] or
2015 'v8' in DEPOT_DEPS_NAME
[depot
]['from']):
2016 # Checkout DEPS file for the current chromium revision.
2017 if self
.source_control
.CheckoutFileAtRevision(bisect_utils
.FILE_DEPS
,
2020 if self
.UpdateDeps(revision
, depot
, deps_file_path
):
2021 diff_command
= ['diff',
2022 '--src-prefix=src/',
2023 '--dst-prefix=src/',
2025 bisect_utils
.FILE_DEPS
]
2026 diff_text
= CheckRunGit(diff_command
, cwd
=self
.src_cwd
)
2027 return (chromium_sha
, ChangeBackslashToSlashInPatch(diff_text
))
2029 raise RuntimeError('Failed to update DEPS file for chromium: [%s]' %
2032 raise RuntimeError('DEPS checkout Failed for chromium revision : [%s]' %
2036 def BuildCurrentRevision(self
, depot
, revision
=None):
2037 """Builds chrome and performance_ui_tests on the current revision.
2040 True if the build was successful.
2042 if self
.opts
.debug_ignore_build
:
2045 os
.chdir(self
.src_cwd
)
2046 # Fetch build archive for the given revision from the cloud storage when
2047 # the storage bucket is passed.
2048 if self
.IsDownloadable(depot
) and revision
:
2050 if depot
!= 'chromium':
2051 # Create a DEPS patch with new revision for dependency repository.
2052 (revision
, deps_patch
) = self
.CreateDEPSPatch(depot
, revision
)
2053 if self
.DownloadCurrentBuild(revision
, patch
=deps_patch
):
2056 # Reverts the changes to DEPS file.
2057 self
.source_control
.CheckoutFileAtRevision(bisect_utils
.FILE_DEPS
,
2063 # These codes are executed when bisect bots builds binaries locally.
2064 build_success
= self
.builder
.Build(depot
, self
.opts
)
2066 return build_success
2068 def RunGClientHooks(self
):
2069 """Runs gclient with runhooks command.
2072 True if gclient reports no errors.
2075 if self
.opts
.debug_ignore_build
:
2078 return not bisect_utils
.RunGClient(['runhooks'], cwd
=self
.src_cwd
)
2080 def TryParseHistogramValuesFromOutput(self
, metric
, text
):
2081 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
2084 metric: The metric as a list of [<trace>, <value>] strings.
2085 text: The text to parse the metric values from.
2088 A list of floating point numbers found.
2090 metric_formatted
= 'HISTOGRAM %s: %s= ' % (metric
[0], metric
[1])
2092 text_lines
= text
.split('\n')
2095 for current_line
in text_lines
:
2096 if metric_formatted
in current_line
:
2097 current_line
= current_line
[len(metric_formatted
):]
2100 histogram_values
= eval(current_line
)
2102 for b
in histogram_values
['buckets']:
2103 average_for_bucket
= float(b
['high'] + b
['low']) * 0.5
2104 # Extends the list with N-elements with the average for that bucket.
2105 values_list
.extend([average_for_bucket
] * b
['count'])
2111 def TryParseResultValuesFromOutput(self
, metric
, text
):
2112 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
2115 metric: The metric as a list of [<trace>, <value>] strings.
2116 text: The text to parse the metric values from.
2119 A list of floating point numbers found.
2121 # Format is: RESULT <graph>: <trace>= <value> <units>
2122 metric_re
= re
.escape('RESULT %s: %s=' % (metric
[0], metric
[1]))
2124 # The log will be parsed looking for format:
2125 # <*>RESULT <graph_name>: <trace_name>= <value>
2126 single_result_re
= re
.compile(
2127 metric_re
+ '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
2129 # The log will be parsed looking for format:
2130 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
2131 multi_results_re
= re
.compile(
2132 metric_re
+ '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
2134 # The log will be parsed looking for format:
2135 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
2136 mean_stddev_re
= re
.compile(
2138 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
2140 text_lines
= text
.split('\n')
2142 for current_line
in text_lines
:
2143 # Parse the output from the performance test for the metric we're
2145 single_result_match
= single_result_re
.search(current_line
)
2146 multi_results_match
= multi_results_re
.search(current_line
)
2147 mean_stddev_match
= mean_stddev_re
.search(current_line
)
2148 if (not single_result_match
is None and
2149 single_result_match
.group('VALUE')):
2150 values_list
+= [single_result_match
.group('VALUE')]
2151 elif (not multi_results_match
is None and
2152 multi_results_match
.group('VALUES')):
2153 metric_values
= multi_results_match
.group('VALUES')
2154 values_list
+= metric_values
.split(',')
2155 elif (not mean_stddev_match
is None and
2156 mean_stddev_match
.group('MEAN')):
2157 values_list
+= [mean_stddev_match
.group('MEAN')]
2159 values_list
= [float(v
) for v
in values_list
if IsStringFloat(v
)]
2161 # If the metric is times/t, we need to sum the timings in order to get
2162 # similar regression results as the try-bots.
2163 metrics_to_sum
= [['times', 't'], ['times', 'page_load_time'],
2164 ['cold_times', 'page_load_time'], ['warm_times', 'page_load_time']]
2166 if metric
in metrics_to_sum
:
2168 values_list
= [reduce(lambda x
, y
: float(x
) + float(y
), values_list
)]
2172 def ParseMetricValuesFromOutput(self
, metric
, text
):
2173 """Parses output from performance_ui_tests and retrieves the results for
2177 metric: The metric as a list of [<trace>, <value>] strings.
2178 text: The text to parse the metric values from.
2181 A list of floating point numbers found.
2183 metric_values
= self
.TryParseResultValuesFromOutput(metric
, text
)
2185 if not metric_values
:
2186 metric_values
= self
.TryParseHistogramValuesFromOutput(metric
, text
)
2188 return metric_values
2190 def _GenerateProfileIfNecessary(self
, command_args
):
2191 """Checks the command line of the performance test for dependencies on
2192 profile generation, and runs tools/perf/generate_profile as necessary.
2195 command_args: Command line being passed to performance test, as a list.
2198 False if profile generation was necessary and failed, otherwise True.
2201 if '--profile-dir' in ' '.join(command_args
):
2202 # If we were using python 2.7+, we could just use the argparse
2203 # module's parse_known_args to grab --profile-dir. Since some of the
2204 # bots still run 2.6, have to grab the arguments manually.
2206 args_to_parse
= ['--profile-dir', '--browser']
2208 for arg_to_parse
in args_to_parse
:
2209 for i
, current_arg
in enumerate(command_args
):
2210 if arg_to_parse
in current_arg
:
2211 current_arg_split
= current_arg
.split('=')
2213 # Check 2 cases, --arg=<val> and --arg <val>
2214 if len(current_arg_split
) == 2:
2215 arg_dict
[arg_to_parse
] = current_arg_split
[1]
2216 elif i
+ 1 < len(command_args
):
2217 arg_dict
[arg_to_parse
] = command_args
[i
+1]
2219 path_to_generate
= os
.path
.join('tools', 'perf', 'generate_profile')
2221 if arg_dict
.has_key('--profile-dir') and arg_dict
.has_key('--browser'):
2222 profile_path
, profile_type
= os
.path
.split(arg_dict
['--profile-dir'])
2223 return not RunProcess(['python', path_to_generate
,
2224 '--profile-type-to-generate', profile_type
,
2225 '--browser', arg_dict
['--browser'], '--output-dir', profile_path
])
2229 def _IsBisectModeUsingMetric(self
):
2230 return self
.opts
.bisect_mode
in [BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
]
2232 def _IsBisectModeReturnCode(self
):
2233 return self
.opts
.bisect_mode
in [BISECT_MODE_RETURN_CODE
]
2235 def _IsBisectModeStandardDeviation(self
):
2236 return self
.opts
.bisect_mode
in [BISECT_MODE_STD_DEV
]
2238 def RunPerformanceTestAndParseResults(
2239 self
, command_to_run
, metric
, reset_on_first_run
=False,
2240 upload_on_last_run
=False, results_label
=None):
2241 """Runs a performance test on the current revision and parses the results.
2244 command_to_run: The command to be run to execute the performance test.
2245 metric: The metric to parse out from the results of the performance test.
2246 This is the result chart name and trace name, separated by slash.
2247 reset_on_first_run: If True, pass the flag --reset-results on first run.
2248 upload_on_last_run: If True, pass the flag --upload-results on last run.
2249 results_label: A value for the option flag --results-label.
2250 The arguments reset_on_first_run, upload_on_last_run and results_label
2251 are all ignored if the test is not a Telemetry test.
2254 (values dict, 0) if --debug_ignore_perf_test was passed.
2255 (values dict, 0, test output) if the test was run successfully.
2256 (error message, -1) if the test couldn't be run.
2257 (error message, -1, test output) if the test ran but there was an error.
2259 success_code
, failure_code
= 0, -1
2261 if self
.opts
.debug_ignore_perf_test
:
2268 return (fake_results
, success_code
)
2270 # For Windows platform set posix=False, to parse windows paths correctly.
2271 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
2272 # refer to http://bugs.python.org/issue1724822. By default posix=True.
2273 args
= shlex
.split(command_to_run
, posix
=not IsWindowsHost())
2275 if not self
._GenerateProfileIfNecessary
(args
):
2276 err_text
= 'Failed to generate profile for performance test.'
2277 return (err_text
, failure_code
)
2279 # If running a Telemetry test for Chrome OS, insert the remote IP and
2280 # identity parameters.
2281 is_telemetry
= bisect_utils
.IsTelemetryCommand(command_to_run
)
2282 if self
.opts
.target_platform
== 'cros' and is_telemetry
:
2283 args
.append('--remote=%s' % self
.opts
.cros_remote_ip
)
2284 args
.append('--identity=%s' % CROS_TEST_KEY_PATH
)
2286 start_time
= time
.time()
2289 output_of_all_runs
= ''
2290 for i
in xrange(self
.opts
.repeat_test_count
):
2291 # Can ignore the return code since if the tests fail, it won't return 0.
2292 current_args
= copy
.copy(args
)
2294 if i
== 0 and reset_on_first_run
:
2295 current_args
.append('--reset-results')
2296 elif i
== self
.opts
.repeat_test_count
- 1 and upload_on_last_run
:
2297 current_args
.append('--upload-results')
2299 current_args
.append('--results-label=%s' % results_label
)
2301 (output
, return_code
) = RunProcessAndRetrieveOutput(current_args
,
2304 if e
.errno
== errno
.ENOENT
:
2305 err_text
= ('Something went wrong running the performance test. '
2306 'Please review the command line:\n\n')
2307 if 'src/' in ' '.join(args
):
2308 err_text
+= ('Check that you haven\'t accidentally specified a '
2309 'path with src/ in the command.\n\n')
2310 err_text
+= ' '.join(args
)
2313 return (err_text
, failure_code
)
2316 output_of_all_runs
+= output
2317 if self
.opts
.output_buildbot_annotations
:
2320 if self
._IsBisectModeUsingMetric
():
2321 metric_values
+= self
.ParseMetricValuesFromOutput(metric
, output
)
2322 # If we're bisecting on a metric (ie, changes in the mean or
2323 # standard deviation) and no metric values are produced, bail out.
2324 if not metric_values
:
2326 elif self
._IsBisectModeReturnCode
():
2327 metric_values
.append(return_code
)
2329 elapsed_minutes
= (time
.time() - start_time
) / 60.0
2330 if elapsed_minutes
>= self
.opts
.max_time_minutes
:
2333 if len(metric_values
) == 0:
2334 err_text
= 'Metric %s was not found in the test output.' % metric
2335 # TODO(qyearsley): Consider also getting and displaying a list of metrics
2336 # that were found in the output here.
2337 return (err_text
, failure_code
, output_of_all_runs
)
2339 # If we're bisecting on return codes, we're really just looking for zero vs
2341 if self
._IsBisectModeReturnCode
():
2342 # If any of the return codes is non-zero, output 1.
2343 overall_return_code
= 0 if (
2344 all(current_value
== 0 for current_value
in metric_values
)) else 1
2347 'mean': overall_return_code
,
2350 'values': metric_values
,
2353 print 'Results of performance test: Command returned with %d' % (
2354 overall_return_code
)
2357 # Need to get the average value if there were multiple values.
2358 truncated_mean
= CalculateTruncatedMean(metric_values
,
2359 self
.opts
.truncate_percent
)
2360 standard_err
= CalculateStandardError(metric_values
)
2361 standard_dev
= CalculateStandardDeviation(metric_values
)
2363 if self
._IsBisectModeStandardDeviation
():
2364 metric_values
= [standard_dev
]
2367 'mean': truncated_mean
,
2368 'std_err': standard_err
,
2369 'std_dev': standard_dev
,
2370 'values': metric_values
,
2373 print 'Results of performance test: %12f %12f' % (
2374 truncated_mean
, standard_err
)
2376 return (values
, success_code
, output_of_all_runs
)
2378 def FindAllRevisionsToSync(self
, revision
, depot
):
2379 """Finds all dependant revisions and depots that need to be synced for a
2380 given revision. This is only useful in the git workflow, as an svn depot
2381 may be split into multiple mirrors.
2383 ie. skia is broken up into 3 git mirrors over skia/src, skia/gyp, and
2384 skia/include. To sync skia/src properly, one has to find the proper
2385 revisions in skia/gyp and skia/include.
2388 revision: The revision to sync to.
2389 depot: The depot in use at the moment (probably skia).
2392 A list of [depot, revision] pairs that need to be synced.
2394 revisions_to_sync
= [[depot
, revision
]]
2396 is_base
= ((depot
== 'chromium') or (depot
== 'cros') or
2397 (depot
== 'android-chrome'))
2399 # Some SVN depots were split into multiple git depots, so we need to
2400 # figure out for each mirror which git revision to grab. There's no
2401 # guarantee that the SVN revision will exist for each of the dependant
2402 # depots, so we have to grep the git logs and grab the next earlier one.
2404 DEPOT_DEPS_NAME
[depot
]['depends'] and\
2405 self
.source_control
.IsGit():
2406 svn_rev
= self
.source_control
.SVNFindRev(revision
)
2408 for d
in DEPOT_DEPS_NAME
[depot
]['depends']:
2409 self
.ChangeToDepotWorkingDirectory(d
)
2411 dependant_rev
= self
.source_control
.ResolveToRevision(svn_rev
, d
, -1000)
2414 revisions_to_sync
.append([d
, dependant_rev
])
2416 num_resolved
= len(revisions_to_sync
)
2417 num_needed
= len(DEPOT_DEPS_NAME
[depot
]['depends'])
2419 self
.ChangeToDepotWorkingDirectory(depot
)
2421 if not ((num_resolved
- 1) == num_needed
):
2424 return revisions_to_sync
2426 def PerformPreBuildCleanup(self
):
2427 """Performs necessary cleanup between runs."""
2428 print 'Cleaning up between runs.'
2431 # Having these pyc files around between runs can confuse the
2432 # perf tests and cause them to crash.
2433 for (path
, _
, files
) in os
.walk(self
.src_cwd
):
2434 for cur_file
in files
:
2435 if cur_file
.endswith('.pyc'):
2436 path_to_file
= os
.path
.join(path
, cur_file
)
2437 os
.remove(path_to_file
)
2439 def PerformWebkitDirectoryCleanup(self
, revision
):
2440 """If the script is switching between Blink and WebKit during bisect,
2441 its faster to just delete the directory rather than leave it up to git
2447 if not self
.source_control
.CheckoutFileAtRevision(
2448 bisect_utils
.FILE_DEPS_GIT
, revision
, cwd
=self
.src_cwd
):
2452 os
.chdir(self
.src_cwd
)
2454 is_blink
= bisect_utils
.IsDepsFileBlink()
2458 if not self
.source_control
.RevertFileToHead(
2459 bisect_utils
.FILE_DEPS_GIT
):
2462 if self
.was_blink
!= is_blink
:
2463 self
.was_blink
= is_blink
2464 # Removes third_party/Webkit directory.
2465 return bisect_utils
.RemoveThirdPartyDirectory('Webkit')
2468 def PerformCrosChrootCleanup(self
):
2469 """Deletes the chroot.
2475 self
.ChangeToDepotWorkingDirectory('cros')
2476 cmd
= [CROS_SDK_PATH
, '--delete']
2477 return_code
= RunProcess(cmd
)
2479 return not return_code
2481 def CreateCrosChroot(self
):
2482 """Creates a new chroot.
2488 self
.ChangeToDepotWorkingDirectory('cros')
2489 cmd
= [CROS_SDK_PATH
, '--create']
2490 return_code
= RunProcess(cmd
)
2492 return not return_code
2494 def PerformPreSyncCleanup(self
, revision
, depot
):
2495 """Performs any necessary cleanup before syncing.
2500 if depot
== 'chromium' or depot
== 'android-chrome':
2501 # Removes third_party/libjingle. At some point, libjingle was causing
2502 # issues syncing when using the git workflow (crbug.com/266324).
2503 os
.chdir(self
.src_cwd
)
2504 if not bisect_utils
.RemoveThirdPartyDirectory('libjingle'):
2506 # Removes third_party/skia. At some point, skia was causing
2507 # issues syncing when using the git workflow (crbug.com/377951).
2508 if not bisect_utils
.RemoveThirdPartyDirectory('skia'):
2510 if depot
== 'chromium':
2511 # The fast webkit cleanup doesn't work for android_chrome
2512 # The switch from Webkit to Blink that this deals with now happened
2513 # quite a long time ago so this is unlikely to be a problem.
2514 return self
.PerformWebkitDirectoryCleanup(revision
)
2515 elif depot
== 'cros':
2516 return self
.PerformCrosChrootCleanup()
2519 def RunPostSync(self
, depot
):
2520 """Performs any work after syncing.
2525 if self
.opts
.target_platform
== 'android':
2526 if not bisect_utils
.SetupAndroidBuildEnvironment(self
.opts
,
2527 path_to_src
=self
.src_cwd
):
2531 return self
.CreateCrosChroot()
2533 return self
.RunGClientHooks()
2536 def ShouldSkipRevision(self
, depot
, revision
):
2537 """Some commits can be safely skipped (such as a DEPS roll), since the tool
2538 is git based those changes would have no effect.
2541 depot: The depot being bisected.
2542 revision: Current revision we're synced to.
2545 True if we should skip building/testing this revision.
2547 if depot
== 'chromium':
2548 if self
.source_control
.IsGit():
2549 cmd
= ['diff-tree', '--no-commit-id', '--name-only', '-r', revision
]
2550 output
= CheckRunGit(cmd
)
2552 files
= output
.splitlines()
2554 if len(files
) == 1 and files
[0] == 'DEPS':
2559 def SyncBuildAndRunRevision(self
, revision
, depot
, command_to_run
, metric
,
2561 """Performs a full sync/build/run of the specified revision.
2564 revision: The revision to sync to.
2565 depot: The depot that's being used at the moment (src, webkit, etc.)
2566 command_to_run: The command to execute the performance test.
2567 metric: The performance metric being tested.
2570 On success, a tuple containing the results of the performance test.
2571 Otherwise, a tuple with the error message.
2574 if depot
== 'chromium' or depot
== 'android-chrome':
2575 sync_client
= 'gclient'
2576 elif depot
== 'cros':
2577 sync_client
= 'repo'
2579 revisions_to_sync
= self
.FindAllRevisionsToSync(revision
, depot
)
2581 if not revisions_to_sync
:
2582 return ('Failed to resolve dependant depots.', BUILD_RESULT_FAIL
)
2584 if not self
.PerformPreSyncCleanup(revision
, depot
):
2585 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL
)
2589 if not self
.opts
.debug_ignore_sync
:
2590 for r
in revisions_to_sync
:
2591 self
.ChangeToDepotWorkingDirectory(r
[0])
2594 self
.PerformPreBuildCleanup()
2596 # If you're using gclient to sync, you need to specify the depot you
2597 # want so that all the dependencies sync properly as well.
2598 # ie. gclient sync src@<SHA1>
2599 current_revision
= r
[1]
2600 if sync_client
== 'gclient':
2601 current_revision
= '%s@%s' % (DEPOT_DEPS_NAME
[depot
]['src'],
2603 if not self
.source_control
.SyncToRevision(current_revision
,
2610 success
= self
.RunPostSync(depot
)
2612 if skippable
and self
.ShouldSkipRevision(depot
, revision
):
2613 return ('Skipped revision: [%s]' % str(revision
),
2614 BUILD_RESULT_SKIPPED
)
2616 start_build_time
= time
.time()
2617 if self
.BuildCurrentRevision(depot
, revision
):
2618 after_build_time
= time
.time()
2619 results
= self
.RunPerformanceTestAndParseResults(command_to_run
,
2621 # Restore build output directory once the tests are done, to avoid
2623 if self
.IsDownloadable(depot
) and revision
:
2624 self
.BackupOrRestoreOutputdirectory(restore
=True)
2627 external_revisions
= self
.Get3rdPartyRevisionsFromCurrentRevision(
2630 if not external_revisions
is None:
2631 return (results
[0], results
[1], external_revisions
,
2632 time
.time() - after_build_time
, after_build_time
-
2635 return ('Failed to parse DEPS file for external revisions.',
2640 return ('Failed to build revision: [%s]' % (str(revision
, )),
2643 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL
)
2645 return ('Failed to sync revision: [%s]' % (str(revision
, )),
2648 def _CheckIfRunPassed(self
, current_value
, known_good_value
, known_bad_value
):
2649 """Given known good and bad values, decide if the current_value passed
2653 current_value: The value of the metric being checked.
2654 known_bad_value: The reference value for a "failed" run.
2655 known_good_value: The reference value for a "passed" run.
2658 True if the current_value is closer to the known_good_value than the
2661 if self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
2662 dist_to_good_value
= abs(current_value
['std_dev'] -
2663 known_good_value
['std_dev'])
2664 dist_to_bad_value
= abs(current_value
['std_dev'] -
2665 known_bad_value
['std_dev'])
2667 dist_to_good_value
= abs(current_value
['mean'] - known_good_value
['mean'])
2668 dist_to_bad_value
= abs(current_value
['mean'] - known_bad_value
['mean'])
2670 return dist_to_good_value
< dist_to_bad_value
2672 def _GetDepotDirectory(self
, depot_name
):
2673 if depot_name
== 'chromium':
2675 elif depot_name
== 'cros':
2676 return self
.cros_cwd
2677 elif depot_name
in DEPOT_NAMES
:
2678 return self
.depot_cwd
[depot_name
]
2680 assert False, 'Unknown depot [ %s ] encountered. Possibly a new one'\
2681 ' was added without proper support?' % depot_name
2683 def ChangeToDepotWorkingDirectory(self
, depot_name
):
2684 """Given a depot, changes to the appropriate working directory.
2687 depot_name: The name of the depot (see DEPOT_NAMES).
2689 os
.chdir(self
._GetDepotDirectory
(depot_name
))
2691 def _FillInV8BleedingEdgeInfo(self
, min_revision_data
, max_revision_data
):
2692 r1
= self
._GetNearestV
8BleedingEdgeFromTrunk
(min_revision_data
['revision'],
2693 search_forward
=True)
2694 r2
= self
._GetNearestV
8BleedingEdgeFromTrunk
(max_revision_data
['revision'],
2695 search_forward
=False)
2696 min_revision_data
['external']['v8_bleeding_edge'] = r1
2697 max_revision_data
['external']['v8_bleeding_edge'] = r2
2699 if (not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2700 min_revision_data
['revision']) or
2701 not self
._GetV
8BleedingEdgeFromV
8TrunkIfMappable
(
2702 max_revision_data
['revision'])):
2703 self
.warnings
.append('Trunk revisions in V8 did not map directly to '
2704 'bleeding_edge. Attempted to expand the range to find V8 rolls which '
2705 'did map directly to bleeding_edge revisions, but results might not '
2708 def _FindNextDepotToBisect(self
, current_depot
, current_revision
,
2709 min_revision_data
, max_revision_data
):
2710 """Given the state of the bisect, decides which depot the script should
2711 dive into next (if any).
2714 current_depot: Current depot being bisected.
2715 current_revision: Current revision synced to.
2716 min_revision_data: Data about the earliest revision in the bisect range.
2717 max_revision_data: Data about the latest revision in the bisect range.
2720 The depot to bisect next, or None.
2722 external_depot
= None
2723 for next_depot
in DEPOT_NAMES
:
2724 if DEPOT_DEPS_NAME
[next_depot
].has_key('platform'):
2725 if DEPOT_DEPS_NAME
[next_depot
]['platform'] != os
.name
:
2728 if not (DEPOT_DEPS_NAME
[next_depot
]["recurse"] and
2729 min_revision_data
['depot'] in DEPOT_DEPS_NAME
[next_depot
]['from']):
2732 if current_depot
== 'v8':
2733 # We grab the bleeding_edge info here rather than earlier because we
2734 # finally have the revision range. From that we can search forwards and
2735 # backwards to try to match trunk revisions to bleeding_edge.
2736 self
._FillInV
8BleedingEdgeInfo
(min_revision_data
, max_revision_data
)
2738 if (min_revision_data
['external'].get(next_depot
) ==
2739 max_revision_data
['external'].get(next_depot
)):
2742 if (min_revision_data
['external'].get(next_depot
) and
2743 max_revision_data
['external'].get(next_depot
)):
2744 external_depot
= next_depot
2747 return external_depot
2749 def PrepareToBisectOnDepot(self
,
2755 """Changes to the appropriate directory and gathers a list of revisions
2756 to bisect between |start_revision| and |end_revision|.
2759 current_depot: The depot we want to bisect.
2760 end_revision: End of the revision range.
2761 start_revision: Start of the revision range.
2762 previous_depot: The depot we were previously bisecting.
2763 previous_revision: The last revision we synced to on |previous_depot|.
2766 A list containing the revisions between |start_revision| and
2767 |end_revision| inclusive.
2769 # Change into working directory of external library to run
2770 # subsequent commands.
2771 self
.ChangeToDepotWorkingDirectory(current_depot
)
2773 # V8 (and possibly others) is merged in periodically. Bisecting
2774 # this directory directly won't give much good info.
2775 if DEPOT_DEPS_NAME
[current_depot
].has_key('custom_deps'):
2776 config_path
= os
.path
.join(self
.src_cwd
, '..')
2777 if bisect_utils
.RunGClientAndCreateConfig(self
.opts
,
2778 DEPOT_DEPS_NAME
[current_depot
]['custom_deps'], cwd
=config_path
):
2780 if bisect_utils
.RunGClient(
2781 ['sync', '--revision', previous_revision
], cwd
=self
.src_cwd
):
2784 if current_depot
== 'v8_bleeding_edge':
2785 self
.ChangeToDepotWorkingDirectory('chromium')
2787 shutil
.move('v8', 'v8.bak')
2788 shutil
.move('v8_bleeding_edge', 'v8')
2790 self
.cleanup_commands
.append(['mv', 'v8', 'v8_bleeding_edge'])
2791 self
.cleanup_commands
.append(['mv', 'v8.bak', 'v8'])
2793 self
.depot_cwd
['v8_bleeding_edge'] = os
.path
.join(self
.src_cwd
, 'v8')
2794 self
.depot_cwd
['v8'] = os
.path
.join(self
.src_cwd
, 'v8.bak')
2796 self
.ChangeToDepotWorkingDirectory(current_depot
)
2798 depot_revision_list
= self
.GetRevisionList(current_depot
,
2802 self
.ChangeToDepotWorkingDirectory('chromium')
2804 return depot_revision_list
2806 def GatherReferenceValues(self
, good_rev
, bad_rev
, cmd
, metric
, target_depot
):
2807 """Gathers reference values by running the performance tests on the
2808 known good and bad revisions.
2811 good_rev: The last known good revision where the performance regression
2812 has not occurred yet.
2813 bad_rev: A revision where the performance regression has already occurred.
2814 cmd: The command to execute the performance test.
2815 metric: The metric being tested for regression.
2818 A tuple with the results of building and running each revision.
2820 bad_run_results
= self
.SyncBuildAndRunRevision(bad_rev
,
2825 good_run_results
= None
2827 if not bad_run_results
[1]:
2828 good_run_results
= self
.SyncBuildAndRunRevision(good_rev
,
2833 return (bad_run_results
, good_run_results
)
2835 def AddRevisionsIntoRevisionData(self
, revisions
, depot
, sort
, revision_data
):
2836 """Adds new revisions to the revision_data dict and initializes them.
2839 revisions: List of revisions to add.
2840 depot: Depot that's currently in use (src, webkit, etc...)
2841 sort: Sorting key for displaying revisions.
2842 revision_data: A dict to add the new revisions into. Existing revisions
2843 will have their sort keys offset.
2846 num_depot_revisions
= len(revisions
)
2848 for _
, v
in revision_data
.iteritems():
2849 if v
['sort'] > sort
:
2850 v
['sort'] += num_depot_revisions
2852 for i
in xrange(num_depot_revisions
):
2855 revision_data
[r
] = {'revision' : r
,
2861 'sort' : i
+ sort
+ 1}
2863 def PrintRevisionsToBisectMessage(self
, revision_list
, depot
):
2864 if self
.opts
.output_buildbot_annotations
:
2865 step_name
= 'Bisection Range: [%s - %s]' % (
2866 revision_list
[len(revision_list
)-1], revision_list
[0])
2867 bisect_utils
.OutputAnnotationStepStart(step_name
)
2870 print 'Revisions to bisect on [%s]:' % depot
2871 for revision_id
in revision_list
:
2872 print ' -> %s' % (revision_id
, )
2875 if self
.opts
.output_buildbot_annotations
:
2876 bisect_utils
.OutputAnnotationStepClosed()
2878 def NudgeRevisionsIfDEPSChange(self
, bad_revision
, good_revision
):
2879 """Checks to see if changes to DEPS file occurred, and that the revision
2880 range also includes the change to .DEPS.git. If it doesn't, attempts to
2881 expand the revision range to include it.
2884 bad_rev: First known bad revision.
2885 good_revision: Last known good revision.
2888 A tuple with the new bad and good revisions.
2890 if self
.source_control
.IsGit() and self
.opts
.target_platform
== 'chromium':
2891 changes_to_deps
= self
.source_control
.QueryFileRevisionHistory(
2892 'DEPS', good_revision
, bad_revision
)
2895 # DEPS file was changed, search from the oldest change to DEPS file to
2896 # bad_revision to see if there are matching .DEPS.git changes.
2897 oldest_deps_change
= changes_to_deps
[-1]
2898 changes_to_gitdeps
= self
.source_control
.QueryFileRevisionHistory(
2899 bisect_utils
.FILE_DEPS_GIT
, oldest_deps_change
, bad_revision
)
2901 if len(changes_to_deps
) != len(changes_to_gitdeps
):
2902 # Grab the timestamp of the last DEPS change
2903 cmd
= ['log', '--format=%ct', '-1', changes_to_deps
[0]]
2904 output
= CheckRunGit(cmd
)
2905 commit_time
= int(output
)
2907 # Try looking for a commit that touches the .DEPS.git file in the
2908 # next 15 minutes after the DEPS file change.
2909 cmd
= ['log', '--format=%H', '-1',
2910 '--before=%d' % (commit_time
+ 900), '--after=%d' % commit_time
,
2911 'origin/master', bisect_utils
.FILE_DEPS_GIT
]
2912 output
= CheckRunGit(cmd
)
2913 output
= output
.strip()
2915 self
.warnings
.append('Detected change to DEPS and modified '
2916 'revision range to include change to .DEPS.git')
2917 return (output
, good_revision
)
2919 self
.warnings
.append('Detected change to DEPS but couldn\'t find '
2920 'matching change to .DEPS.git')
2921 return (bad_revision
, good_revision
)
2923 def CheckIfRevisionsInProperOrder(self
,
2927 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2930 good_revision: Number/tag of the known good revision.
2931 bad_revision: Number/tag of the known bad revision.
2934 True if the revisions are in the proper order (good earlier than bad).
2936 if self
.source_control
.IsGit() and target_depot
!= 'cros':
2937 cmd
= ['log', '--format=%ct', '-1', good_revision
]
2938 cwd
= self
._GetDepotDirectory
(target_depot
)
2940 output
= CheckRunGit(cmd
, cwd
=cwd
)
2941 good_commit_time
= int(output
)
2943 cmd
= ['log', '--format=%ct', '-1', bad_revision
]
2944 output
= CheckRunGit(cmd
, cwd
=cwd
)
2945 bad_commit_time
= int(output
)
2947 return good_commit_time
<= bad_commit_time
2949 # Cros/svn use integers
2950 return int(good_revision
) <= int(bad_revision
)
2952 def Run(self
, command_to_run
, bad_revision_in
, good_revision_in
, metric
):
2953 """Given known good and bad revisions, run a binary search on all
2954 intermediate revisions to determine the CL where the performance regression
2958 command_to_run: Specify the command to execute the performance test.
2959 good_revision: Number/tag of the known good revision.
2960 bad_revision: Number/tag of the known bad revision.
2961 metric: The performance metric to monitor.
2964 A dict with 2 members, 'revision_data' and 'error'. On success,
2965 'revision_data' will contain a dict mapping revision ids to
2966 data about that revision. Each piece of revision data consists of a
2967 dict with the following keys:
2969 'passed': Represents whether the performance test was successful at
2970 that revision. Possible values include: 1 (passed), 0 (failed),
2971 '?' (skipped), 'F' (build failed).
2972 'depot': The depot that this revision is from (ie. WebKit)
2973 'external': If the revision is a 'src' revision, 'external' contains
2974 the revisions of each of the external libraries.
2975 'sort': A sort value for sorting the dict in order of commits.
2992 If an error occurred, the 'error' field will contain the message and
2993 'revision_data' will be empty.
2995 results
= {'revision_data' : {},
2998 # Choose depot to bisect first
2999 target_depot
= 'chromium'
3000 if self
.opts
.target_platform
== 'cros':
3001 target_depot
= 'cros'
3002 elif self
.opts
.target_platform
== 'android-chrome':
3003 target_depot
= 'android-chrome'
3006 self
.ChangeToDepotWorkingDirectory(target_depot
)
3008 # If they passed SVN CL's, etc... we can try match them to git SHA1's.
3009 bad_revision
= self
.source_control
.ResolveToRevision(bad_revision_in
,
3011 good_revision
= self
.source_control
.ResolveToRevision(good_revision_in
,
3017 if bad_revision
is None:
3018 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (bad_revision_in
,)
3021 if good_revision
is None:
3022 results
['error'] = 'Could\'t resolve [%s] to SHA1.' % (good_revision_in
,)
3025 # Check that they didn't accidentally swap good and bad revisions.
3026 if not self
.CheckIfRevisionsInProperOrder(
3027 target_depot
, good_revision
, bad_revision
):
3028 results
['error'] = 'bad_revision < good_revision, did you swap these '\
3032 (bad_revision
, good_revision
) = self
.NudgeRevisionsIfDEPSChange(
3033 bad_revision
, good_revision
)
3035 if self
.opts
.output_buildbot_annotations
:
3036 bisect_utils
.OutputAnnotationStepStart('Gathering Revisions')
3038 print 'Gathering revision range for bisection.'
3039 # Retrieve a list of revisions to do bisection on.
3040 src_revision_list
= self
.GetRevisionList(target_depot
,
3044 if self
.opts
.output_buildbot_annotations
:
3045 bisect_utils
.OutputAnnotationStepClosed()
3047 if src_revision_list
:
3048 # revision_data will store information about a revision such as the
3049 # depot it came from, the webkit/V8 revision at that time,
3050 # performance timing, build state, etc...
3051 revision_data
= results
['revision_data']
3053 # revision_list is the list we're binary searching through at the moment.
3058 for current_revision_id
in src_revision_list
:
3061 revision_data
[current_revision_id
] = {'value' : None,
3063 'depot' : target_depot
,
3067 'sort' : sort_key_ids
}
3068 revision_list
.append(current_revision_id
)
3071 max_revision
= len(revision_list
) - 1
3073 self
.PrintRevisionsToBisectMessage(revision_list
, target_depot
)
3075 if self
.opts
.output_buildbot_annotations
:
3076 bisect_utils
.OutputAnnotationStepStart('Gathering Reference Values')
3078 print 'Gathering reference values for bisection.'
3080 # Perform the performance tests on the good and bad revisions, to get
3082 (bad_results
, good_results
) = self
.GatherReferenceValues(good_revision
,
3088 if self
.opts
.output_buildbot_annotations
:
3089 bisect_utils
.OutputAnnotationStepClosed()
3092 results
['error'] = ('An error occurred while building and running '
3093 'the \'bad\' reference value. The bisect cannot continue without '
3094 'a working \'bad\' revision to start from.\n\nError: %s' %
3099 results
['error'] = ('An error occurred while building and running '
3100 'the \'good\' reference value. The bisect cannot continue without '
3101 'a working \'good\' revision to start from.\n\nError: %s' %
3106 # We need these reference values to determine if later runs should be
3107 # classified as pass or fail.
3108 known_bad_value
= bad_results
[0]
3109 known_good_value
= good_results
[0]
3111 # Can just mark the good and bad revisions explicitly here since we
3112 # already know the results.
3113 bad_revision_data
= revision_data
[revision_list
[0]]
3114 bad_revision_data
['external'] = bad_results
[2]
3115 bad_revision_data
['perf_time'] = bad_results
[3]
3116 bad_revision_data
['build_time'] = bad_results
[4]
3117 bad_revision_data
['passed'] = False
3118 bad_revision_data
['value'] = known_bad_value
3120 good_revision_data
= revision_data
[revision_list
[max_revision
]]
3121 good_revision_data
['external'] = good_results
[2]
3122 good_revision_data
['perf_time'] = good_results
[3]
3123 good_revision_data
['build_time'] = good_results
[4]
3124 good_revision_data
['passed'] = True
3125 good_revision_data
['value'] = known_good_value
3127 next_revision_depot
= target_depot
3130 if not revision_list
:
3133 min_revision_data
= revision_data
[revision_list
[min_revision
]]
3134 max_revision_data
= revision_data
[revision_list
[max_revision
]]
3136 if max_revision
- min_revision
<= 1:
3137 current_depot
= min_revision_data
['depot']
3138 if min_revision_data
['passed'] == '?':
3139 next_revision_index
= min_revision
3140 elif max_revision_data
['passed'] == '?':
3141 next_revision_index
= max_revision
3142 elif current_depot
in ['android-chrome', 'cros', 'chromium', 'v8']:
3143 previous_revision
= revision_list
[min_revision
]
3144 # If there were changes to any of the external libraries we track,
3145 # should bisect the changes there as well.
3146 external_depot
= self
._FindNextDepotToBisect
(current_depot
,
3147 previous_revision
, min_revision_data
, max_revision_data
)
3149 # If there was no change in any of the external depots, the search
3151 if not external_depot
:
3152 if current_depot
== 'v8':
3153 self
.warnings
.append('Unfortunately, V8 bisection couldn\'t '
3154 'continue any further. The script can only bisect into '
3155 'V8\'s bleeding_edge repository if both the current and '
3156 'previous revisions in trunk map directly to revisions in '
3160 earliest_revision
= max_revision_data
['external'][external_depot
]
3161 latest_revision
= min_revision_data
['external'][external_depot
]
3163 new_revision_list
= self
.PrepareToBisectOnDepot(external_depot
,
3166 next_revision_depot
,
3169 if not new_revision_list
:
3170 results
['error'] = 'An error occurred attempting to retrieve'\
3171 ' revision range: [%s..%s]' % \
3172 (earliest_revision
, latest_revision
)
3175 self
.AddRevisionsIntoRevisionData(new_revision_list
,
3177 min_revision_data
['sort'],
3180 # Reset the bisection and perform it on the newly inserted
3182 revision_list
= new_revision_list
3184 max_revision
= len(revision_list
) - 1
3185 sort_key_ids
+= len(revision_list
)
3187 print 'Regression in metric:%s appears to be the result of changes'\
3188 ' in [%s].' % (metric
, external_depot
)
3190 self
.PrintRevisionsToBisectMessage(revision_list
, external_depot
)
3196 next_revision_index
= int((max_revision
- min_revision
) / 2) +\
3199 next_revision_id
= revision_list
[next_revision_index
]
3200 next_revision_data
= revision_data
[next_revision_id
]
3201 next_revision_depot
= next_revision_data
['depot']
3203 self
.ChangeToDepotWorkingDirectory(next_revision_depot
)
3205 if self
.opts
.output_buildbot_annotations
:
3206 step_name
= 'Working on [%s]' % next_revision_id
3207 bisect_utils
.OutputAnnotationStepStart(step_name
)
3209 print 'Working on revision: [%s]' % next_revision_id
3211 run_results
= self
.SyncBuildAndRunRevision(next_revision_id
,
3212 next_revision_depot
,
3214 metric
, skippable
=True)
3216 # If the build is successful, check whether or not the metric
3218 if not run_results
[1]:
3219 if len(run_results
) > 2:
3220 next_revision_data
['external'] = run_results
[2]
3221 next_revision_data
['perf_time'] = run_results
[3]
3222 next_revision_data
['build_time'] = run_results
[4]
3224 passed_regression
= self
._CheckIfRunPassed
(run_results
[0],
3228 next_revision_data
['passed'] = passed_regression
3229 next_revision_data
['value'] = run_results
[0]
3231 if passed_regression
:
3232 max_revision
= next_revision_index
3234 min_revision
= next_revision_index
3236 if run_results
[1] == BUILD_RESULT_SKIPPED
:
3237 next_revision_data
['passed'] = 'Skipped'
3238 elif run_results
[1] == BUILD_RESULT_FAIL
:
3239 next_revision_data
['passed'] = 'Build Failed'
3241 print run_results
[0]
3243 # If the build is broken, remove it and redo search.
3244 revision_list
.pop(next_revision_index
)
3248 if self
.opts
.output_buildbot_annotations
:
3249 self
._PrintPartialResults
(results
)
3250 bisect_utils
.OutputAnnotationStepClosed()
3252 # Weren't able to sync and retrieve the revision range.
3253 results
['error'] = 'An error occurred attempting to retrieve revision '\
3254 'range: [%s..%s]' % (good_revision
, bad_revision
)
3258 def _PrintPartialResults(self
, results_dict
):
3259 revision_data
= results_dict
['revision_data']
3260 revision_data_sorted
= sorted(revision_data
.iteritems(),
3261 key
= lambda x
: x
[1]['sort'])
3262 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
3264 self
._PrintTestedCommitsTable
(revision_data_sorted
,
3265 results_dict
['first_working_revision'],
3266 results_dict
['last_broken_revision'],
3267 100, final_step
=False)
3269 def _PrintConfidence(self
, results_dict
):
3270 # The perf dashboard specifically looks for the string
3271 # "Confidence in Bisection Results: 100%" to decide whether or not
3272 # to cc the author(s). If you change this, please update the perf
3273 # dashboard as well.
3274 print 'Confidence in Bisection Results: %d%%' % results_dict
['confidence']
3276 def _ConfidenceLevelStatus(self
, results_dict
):
3277 if not results_dict
['confidence']:
3279 confidence_status
= 'Successful with %(level)s confidence%(warning)s.'
3280 if results_dict
['confidence'] >= 95:
3284 warning
= ' and warnings'
3285 if not self
.warnings
:
3287 return confidence_status
% {'level': level
, 'warning': warning
}
3289 def _PrintThankYou(self
):
3290 print RESULTS_THANKYOU
3292 def _PrintBanner(self
, results_dict
):
3293 if self
._IsBisectModeReturnCode
():
3297 metrics
= '/'.join(self
.opts
.metric
)
3298 change
= '%.02f%% (+/-%.02f%%)' % (
3299 results_dict
['regression_size'], results_dict
['regression_std_err'])
3301 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
3302 status
= self
._ConfidenceLevelStatus
(results_dict
)
3304 status
= 'Failure, could not reproduce.'
3305 change
= 'Bisect could not reproduce a change.'
3307 print RESULTS_BANNER
% {
3309 'command': self
.opts
.command
,
3312 'confidence': results_dict
['confidence'],
3316 def _PrintFailedBanner(self
, results_dict
):
3318 if self
._IsBisectModeReturnCode
():
3319 print 'Bisect could not reproduce a change in the return code.'
3321 print ('Bisect could not reproduce a change in the '
3322 '%s metric.' % '/'.join(self
.opts
.metric
))
3325 def _GetViewVCLinkFromDepotAndHash(self
, cl
, depot
):
3326 info
= self
.source_control
.QueryRevisionInfo(cl
,
3327 self
._GetDepotDirectory
(depot
))
3328 if depot
and DEPOT_DEPS_NAME
[depot
].has_key('viewvc'):
3330 # Format is "git-svn-id: svn://....@123456 <other data>"
3331 svn_line
= [i
for i
in info
['body'].splitlines() if 'git-svn-id:' in i
]
3332 svn_revision
= svn_line
[0].split('@')
3333 svn_revision
= svn_revision
[1].split(' ')[0]
3334 return DEPOT_DEPS_NAME
[depot
]['viewvc'] + svn_revision
3339 def _PrintRevisionInfo(self
, cl
, info
, depot
=None):
3341 if not info
['email'].startswith(info
['author']):
3342 email_info
= '\nEmail : %s' % info
['email']
3343 commit_link
= self
._GetViewVCLinkFromDepotAndHash
(cl
, depot
)
3345 commit_info
= '\nLink : %s' % commit_link
3347 commit_info
= ('\nFailed to parse svn revision from body:\n%s' %
3349 print RESULTS_REVISION_INFO
% {
3350 'subject': info
['subject'],
3351 'author': info
['author'],
3352 'email_info': email_info
,
3353 'commit_info': commit_info
,
3354 'cl_date': info
['date']
3357 def _PrintTableRow(self
, column_widths
, row_data
):
3358 assert len(column_widths
) == len(row_data
)
3361 for i
in xrange(len(column_widths
)):
3362 current_row_data
= row_data
[i
].center(column_widths
[i
], ' ')
3363 text
+= ('%%%ds' % column_widths
[i
]) % current_row_data
3366 def _PrintTestedCommitsHeader(self
):
3367 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
3368 self
._PrintTableRow
(
3369 [20, 70, 14, 12, 13],
3370 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
3371 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
3372 self
._PrintTableRow
(
3373 [20, 70, 14, 12, 13],
3374 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
3375 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
3376 self
._PrintTableRow
(
3378 ['Depot', 'Commit SHA', 'Return Code', 'State'])
3380 assert False, "Invalid bisect_mode specified."
3381 print ' %20s %70s %14s %13s' % ('Depot'.center(20, ' '),
3382 'Commit SHA'.center(70, ' '), 'Return Code'.center(14, ' '),
3383 'State'.center(13, ' '))
3385 def _PrintTestedCommitsEntry(self
, current_data
, cl_link
, state_str
):
3386 if self
.opts
.bisect_mode
== BISECT_MODE_MEAN
:
3387 std_error
= '+-%.02f' % current_data
['value']['std_err']
3388 mean
= '%.02f' % current_data
['value']['mean']
3389 self
._PrintTableRow
(
3390 [20, 70, 12, 14, 13],
3391 [current_data
['depot'], cl_link
, mean
, std_error
, state_str
])
3392 elif self
.opts
.bisect_mode
== BISECT_MODE_STD_DEV
:
3393 std_error
= '+-%.02f' % current_data
['value']['std_err']
3394 mean
= '%.02f' % current_data
['value']['mean']
3395 self
._PrintTableRow
(
3396 [20, 70, 12, 14, 13],
3397 [current_data
['depot'], cl_link
, std_error
, mean
, state_str
])
3398 elif self
.opts
.bisect_mode
== BISECT_MODE_RETURN_CODE
:
3399 mean
= '%d' % current_data
['value']['mean']
3400 self
._PrintTableRow
(
3402 [current_data
['depot'], cl_link
, mean
, state_str
])
3404 def _PrintTestedCommitsTable(self
, revision_data_sorted
,
3405 first_working_revision
, last_broken_revision
, confidence
,
3409 print '===== TESTED COMMITS ====='
3411 print '===== PARTIAL RESULTS ====='
3412 self
._PrintTestedCommitsHeader
()
3414 for current_id
, current_data
in revision_data_sorted
:
3415 if current_data
['value']:
3416 if (current_id
== last_broken_revision
or
3417 current_id
== first_working_revision
):
3418 # If confidence is too low, don't add this empty line since it's
3419 # used to put focus on a suspected CL.
3420 if confidence
and final_step
:
3423 if state
== 2 and not final_step
:
3424 # Just want a separation between "bad" and "good" cl's.
3428 if state
== 1 and final_step
:
3429 state_str
= 'Suspected CL'
3433 # If confidence is too low, don't bother outputting good/bad.
3436 state_str
= state_str
.center(13, ' ')
3438 cl_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
3439 current_data
['depot'])
3441 cl_link
= current_id
3442 self
._PrintTestedCommitsEntry
(current_data
, cl_link
, state_str
)
3444 def _PrintReproSteps(self
):
3445 command
= '$ ' + self
.opts
.command
3446 if bisect_utils
.IsTelemetryCommand(self
.opts
.command
):
3447 command
+= ('\nAlso consider passing --profiler=list to see available '
3449 print REPRO_STEPS_LOCAL
% {'command': command
}
3450 print REPRO_STEPS_TRYJOB
% {'command': command
}
3452 def _PrintOtherRegressions(self
, other_regressions
, revision_data
):
3454 print 'Other regressions may have occurred:'
3455 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
3456 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
3457 for regression
in other_regressions
:
3458 current_id
, previous_id
, confidence
= regression
3459 current_data
= revision_data
[current_id
]
3460 previous_data
= revision_data
[previous_id
]
3462 current_link
= self
._GetViewVCLinkFromDepotAndHash
(current_id
,
3463 current_data
['depot'])
3464 previous_link
= self
._GetViewVCLinkFromDepotAndHash
(previous_id
,
3465 previous_data
['depot'])
3467 # If we can't map it to a viewable URL, at least show the original hash.
3468 if not current_link
:
3469 current_link
= current_id
3470 if not previous_link
:
3471 previous_link
= previous_id
3473 print ' %8s %70s %s' % (
3474 current_data
['depot'], current_link
,
3475 ('%d%%' % confidence
).center(10, ' '))
3476 print ' %8s %70s' % (
3477 previous_data
['depot'], previous_link
)
3480 def _PrintStepTime(self
, revision_data_sorted
):
3481 step_perf_time_avg
= 0.0
3482 step_build_time_avg
= 0.0
3484 for _
, current_data
in revision_data_sorted
:
3485 if current_data
['value']:
3486 step_perf_time_avg
+= current_data
['perf_time']
3487 step_build_time_avg
+= current_data
['build_time']
3490 step_perf_time_avg
= step_perf_time_avg
/ step_count
3491 step_build_time_avg
= step_build_time_avg
/ step_count
3493 print 'Average build time : %s' % datetime
.timedelta(
3494 seconds
=int(step_build_time_avg
))
3495 print 'Average test time : %s' % datetime
.timedelta(
3496 seconds
=int(step_perf_time_avg
))
3498 def _PrintWarnings(self
):
3499 if not self
.warnings
:
3503 for w
in set(self
.warnings
):
3506 def _FindOtherRegressions(self
, revision_data_sorted
, bad_greater_than_good
):
3507 other_regressions
= []
3508 previous_values
= []
3510 for current_id
, current_data
in revision_data_sorted
:
3511 current_values
= current_data
['value']
3513 current_values
= current_values
['values']
3515 confidence
= CalculateConfidence(previous_values
, [current_values
])
3516 mean_of_prev_runs
= CalculateMean(sum(previous_values
, []))
3517 mean_of_current_runs
= CalculateMean(current_values
)
3519 # Check that the potential regression is in the same direction as
3520 # the overall regression. If the mean of the previous runs < the
3521 # mean of the current runs, this local regression is in same
3523 prev_less_than_current
= mean_of_prev_runs
< mean_of_current_runs
3524 is_same_direction
= (prev_less_than_current
if
3525 bad_greater_than_good
else not prev_less_than_current
)
3527 # Only report potential regressions with high confidence.
3528 if is_same_direction
and confidence
> 50:
3529 other_regressions
.append([current_id
, previous_id
, confidence
])
3530 previous_values
.append(current_values
)
3531 previous_id
= current_id
3532 return other_regressions
3535 def _GetResultsDict(self
, revision_data
, revision_data_sorted
):
3536 # Find range where it possibly broke.
3537 first_working_revision
= None
3538 first_working_revision_index
= -1
3539 last_broken_revision
= None
3540 last_broken_revision_index
= -1
3542 for i
in xrange(len(revision_data_sorted
)):
3543 k
, v
= revision_data_sorted
[i
]
3544 if v
['passed'] == 1:
3545 if not first_working_revision
:
3546 first_working_revision
= k
3547 first_working_revision_index
= i
3550 last_broken_revision
= k
3551 last_broken_revision_index
= i
3553 if last_broken_revision
!= None and first_working_revision
!= None:
3555 for i
in xrange(0, last_broken_revision_index
+ 1):
3556 if revision_data_sorted
[i
][1]['value']:
3557 broken_means
.append(revision_data_sorted
[i
][1]['value']['values'])
3560 for i
in xrange(first_working_revision_index
, len(revision_data_sorted
)):
3561 if revision_data_sorted
[i
][1]['value']:
3562 working_means
.append(revision_data_sorted
[i
][1]['value']['values'])
3564 # Flatten the lists to calculate mean of all values.
3565 working_mean
= sum(working_means
, [])
3566 broken_mean
= sum(broken_means
, [])
3568 # Calculate the approximate size of the regression
3569 mean_of_bad_runs
= CalculateMean(broken_mean
)
3570 mean_of_good_runs
= CalculateMean(working_mean
)
3572 regression_size
= 100 * CalculateRelativeChange(mean_of_good_runs
,
3574 if math
.isnan(regression_size
):
3575 regression_size
= 'zero-to-nonzero'
3577 regression_std_err
= math
.fabs(CalculatePooledStandardError(
3578 [working_mean
, broken_mean
]) /
3579 max(0.0001, min(mean_of_good_runs
, mean_of_bad_runs
))) * 100.0
3581 # Give a "confidence" in the bisect. At the moment we use how distinct the
3582 # values are before and after the last broken revision, and how noisy the
3584 confidence
= CalculateConfidence(working_means
, broken_means
)
3586 culprit_revisions
= []
3589 self
.ChangeToDepotWorkingDirectory(
3590 revision_data
[last_broken_revision
]['depot'])
3592 if revision_data
[last_broken_revision
]['depot'] == 'cros':
3593 # Want to get a list of all the commits and what depots they belong
3594 # to so that we can grab info about each.
3595 cmd
= ['repo', 'forall', '-c',
3596 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
3597 last_broken_revision
, first_working_revision
+ 1)]
3598 (output
, return_code
) = RunProcessAndRetrieveOutput(cmd
)
3601 assert not return_code
, 'An error occurred while running'\
3602 ' "%s"' % ' '.join(cmd
)
3605 for l
in output
.split('\n'):
3607 # Output will be in form:
3609 # /path_to_other_depot
3617 contents
= l
.split(' ')
3618 if len(contents
) > 1:
3619 changes
.append([last_depot
, contents
[0]])
3622 info
= self
.source_control
.QueryRevisionInfo(c
[1])
3623 culprit_revisions
.append((c
[1], info
, None))
3625 for i
in xrange(last_broken_revision_index
, len(revision_data_sorted
)):
3626 k
, v
= revision_data_sorted
[i
]
3627 if k
== first_working_revision
:
3629 self
.ChangeToDepotWorkingDirectory(v
['depot'])
3630 info
= self
.source_control
.QueryRevisionInfo(k
)
3631 culprit_revisions
.append((k
, info
, v
['depot']))
3634 # Check for any other possible regression ranges
3635 other_regressions
= self
._FindOtherRegressions
(revision_data_sorted
,
3636 mean_of_bad_runs
> mean_of_good_runs
)
3639 'first_working_revision': first_working_revision
,
3640 'last_broken_revision': last_broken_revision
,
3641 'culprit_revisions': culprit_revisions
,
3642 'other_regressions': other_regressions
,
3643 'regression_size': regression_size
,
3644 'regression_std_err': regression_std_err
,
3645 'confidence': confidence
,
3648 def _CheckForWarnings(self
, results_dict
):
3649 if len(results_dict
['culprit_revisions']) > 1:
3650 self
.warnings
.append('Due to build errors, regression range could '
3651 'not be narrowed down to a single commit.')
3652 if self
.opts
.repeat_test_count
== 1:
3653 self
.warnings
.append('Tests were only set to run once. This may '
3654 'be insufficient to get meaningful results.')
3655 if results_dict
['confidence'] < 100:
3656 if results_dict
['confidence']:
3657 self
.warnings
.append(
3658 'Confidence is less than 100%. There could be other candidates '
3659 'for this regression. Try bisecting again with increased '
3660 'repeat_count or on a sub-metric that shows the regression more '
3663 self
.warnings
.append(
3664 'Confidence is 0%. Try bisecting again on another platform, with '
3665 'increased repeat_count or on a sub-metric that shows the '
3666 'regression more clearly.')
3668 def FormatAndPrintResults(self
, bisect_results
):
3669 """Prints the results from a bisection run in a readable format.
3672 bisect_results: The results from a bisection test run.
3674 revision_data
= bisect_results
['revision_data']
3675 revision_data_sorted
= sorted(revision_data
.iteritems(),
3676 key
= lambda x
: x
[1]['sort'])
3677 results_dict
= self
._GetResultsDict
(revision_data
, revision_data_sorted
)
3679 self
._CheckForWarnings
(results_dict
)
3681 if self
.opts
.output_buildbot_annotations
:
3682 bisect_utils
.OutputAnnotationStepStart('Build Status Per Revision')
3685 print 'Full results of bisection:'
3686 for current_id
, current_data
in revision_data_sorted
:
3687 build_status
= current_data
['passed']
3689 if type(build_status
) is bool:
3691 build_status
= 'Good'
3693 build_status
= 'Bad'
3695 print ' %20s %40s %s' % (current_data
['depot'],
3696 current_id
, build_status
)
3699 if self
.opts
.output_buildbot_annotations
:
3700 bisect_utils
.OutputAnnotationStepClosed()
3701 # The perf dashboard scrapes the "results" step in order to comment on
3702 # bugs. If you change this, please update the perf dashboard as well.
3703 bisect_utils
.OutputAnnotationStepStart('Results')
3705 self
._PrintBanner
(results_dict
)
3706 self
._PrintWarnings
()
3708 if results_dict
['culprit_revisions'] and results_dict
['confidence']:
3709 for culprit
in results_dict
['culprit_revisions']:
3710 cl
, info
, depot
= culprit
3711 self
._PrintRevisionInfo
(cl
, info
, depot
)
3712 if results_dict
['other_regressions']:
3713 self
._PrintOtherRegressions
(results_dict
['other_regressions'],
3715 self
._PrintTestedCommitsTable
(revision_data_sorted
,
3716 results_dict
['first_working_revision'],
3717 results_dict
['last_broken_revision'],
3718 results_dict
['confidence'])
3719 self
._PrintStepTime
(revision_data_sorted
)
3720 self
._PrintReproSteps
()
3721 self
._PrintThankYou
()
3722 if self
.opts
.output_buildbot_annotations
:
3723 bisect_utils
.OutputAnnotationStepClosed()
3726 def DetermineAndCreateSourceControl(opts
):
3727 """Attempts to determine the underlying source control workflow and returns
3728 a SourceControl object.
3731 An instance of a SourceControl object, or None if the current workflow
3735 (output
, _
) = RunGit(['rev-parse', '--is-inside-work-tree'])
3737 if output
.strip() == 'true':
3738 return GitSourceControl(opts
)
3743 def IsPlatformSupported(opts
):
3744 """Checks that this platform and build system are supported.
3747 opts: The options parsed from the command line.
3750 True if the platform and build system are supported.
3752 # Haven't tested the script out on any other platforms yet.
3753 supported
= ['posix', 'nt']
3754 return os
.name
in supported
3757 def RmTreeAndMkDir(path_to_dir
, skip_makedir
=False):
3758 """Removes the directory tree specified, and then creates an empty
3759 directory in the same location (if not specified to skip).
3762 path_to_dir: Path to the directory tree.
3763 skip_makedir: Whether to skip creating empty directory, default is False.
3766 True if successful, False if an error occurred.
3769 if os
.path
.exists(path_to_dir
):
3770 shutil
.rmtree(path_to_dir
)
3772 if e
.errno
!= errno
.ENOENT
:
3775 if not skip_makedir
:
3776 return MaybeMakeDirectory(path_to_dir
)
3781 def RemoveBuildFiles(build_type
):
3782 """Removes build files from previous runs."""
3783 if RmTreeAndMkDir(os
.path
.join('out', build_type
)):
3784 if RmTreeAndMkDir(os
.path
.join('build', build_type
)):
3789 class BisectOptions(object):
3790 """Options to be used when running bisection."""
3792 super(BisectOptions
, self
).__init
__()
3794 self
.target_platform
= 'chromium'
3795 self
.build_preference
= None
3796 self
.good_revision
= None
3797 self
.bad_revision
= None
3798 self
.use_goma
= None
3799 self
.goma_dir
= None
3800 self
.cros_board
= None
3801 self
.cros_remote_ip
= None
3802 self
.repeat_test_count
= 20
3803 self
.truncate_percent
= 25
3804 self
.max_time_minutes
= 20
3807 self
.output_buildbot_annotations
= None
3808 self
.no_custom_deps
= False
3809 self
.working_directory
= None
3810 self
.extra_src
= None
3811 self
.debug_ignore_build
= None
3812 self
.debug_ignore_sync
= None
3813 self
.debug_ignore_perf_test
= None
3814 self
.gs_bucket
= None
3815 self
.target_arch
= 'ia32'
3816 self
.target_build_type
= 'Release'
3817 self
.builder_host
= None
3818 self
.builder_port
= None
3819 self
.bisect_mode
= BISECT_MODE_MEAN
3821 def _CreateCommandLineParser(self
):
3822 """Creates a parser with bisect options.
3825 An instance of optparse.OptionParser.
3827 usage
= ('%prog [options] [-- chromium-options]\n'
3828 'Perform binary search on revision history to find a minimal '
3829 'range of revisions where a peformance metric regressed.\n')
3831 parser
= optparse
.OptionParser(usage
=usage
)
3833 group
= optparse
.OptionGroup(parser
, 'Bisect options')
3834 group
.add_option('-c', '--command',
3836 help='A command to execute your performance test at' +
3837 ' each point in the bisection.')
3838 group
.add_option('-b', '--bad_revision',
3840 help='A bad revision to start bisection. ' +
3841 'Must be later than good revision. May be either a git' +
3842 ' or svn revision.')
3843 group
.add_option('-g', '--good_revision',
3845 help='A revision to start bisection where performance' +
3846 ' test is known to pass. Must be earlier than the ' +
3847 'bad revision. May be either a git or svn revision.')
3848 group
.add_option('-m', '--metric',
3850 help='The desired metric to bisect on. For example ' +
3851 '"vm_rss_final_b/vm_rss_f_b"')
3852 group
.add_option('-r', '--repeat_test_count',
3855 help='The number of times to repeat the performance '
3856 'test. Values will be clamped to range [1, 100]. '
3857 'Default value is 20.')
3858 group
.add_option('--max_time_minutes',
3861 help='The maximum time (in minutes) to take running the '
3862 'performance tests. The script will run the performance '
3863 'tests according to --repeat_test_count, so long as it '
3864 'doesn\'t exceed --max_time_minutes. Values will be '
3865 'clamped to range [1, 60].'
3866 'Default value is 20.')
3867 group
.add_option('-t', '--truncate_percent',
3870 help='The highest/lowest % are discarded to form a '
3871 'truncated mean. Values will be clamped to range [0, '
3872 '25]. Default value is 25 (highest/lowest 25% will be '
3874 group
.add_option('--bisect_mode',
3876 choices
=[BISECT_MODE_MEAN
, BISECT_MODE_STD_DEV
,
3877 BISECT_MODE_RETURN_CODE
],
3878 default
=BISECT_MODE_MEAN
,
3879 help='The bisect mode. Choices are to bisect on the '
3880 'difference in mean, std_dev, or return_code.')
3881 parser
.add_option_group(group
)
3883 group
= optparse
.OptionGroup(parser
, 'Build options')
3884 group
.add_option('-w', '--working_directory',
3886 help='Path to the working directory where the script '
3887 'will do an initial checkout of the chromium depot. The '
3888 'files will be placed in a subdirectory "bisect" under '
3889 'working_directory and that will be used to perform the '
3890 'bisection. This parameter is optional, if it is not '
3891 'supplied, the script will work from the current depot.')
3892 group
.add_option('--build_preference',
3894 choices
=['msvs', 'ninja', 'make'],
3895 help='The preferred build system to use. On linux/mac '
3896 'the options are make/ninja. On Windows, the options '
3898 group
.add_option('--target_platform',
3900 choices
=['chromium', 'cros', 'android', 'android-chrome'],
3902 help='The target platform. Choices are "chromium" '
3903 '(current platform), "cros", or "android". If you '
3904 'specify something other than "chromium", you must be '
3905 'properly set up to build that platform.')
3906 group
.add_option('--no_custom_deps',
3907 dest
='no_custom_deps',
3908 action
="store_true",
3910 help='Run the script with custom_deps or not.')
3911 group
.add_option('--extra_src',
3913 help='Path to a script which can be used to modify '
3914 'the bisect script\'s behavior.')
3915 group
.add_option('--cros_board',
3917 help='The cros board type to build.')
3918 group
.add_option('--cros_remote_ip',
3920 help='The remote machine to image to.')
3921 group
.add_option('--use_goma',
3922 action
="store_true",
3923 help='Add a bunch of extra threads for goma, and enable '
3925 group
.add_option('--goma_dir',
3926 help='Path to goma tools (or system default if not '
3928 group
.add_option('--output_buildbot_annotations',
3929 action
="store_true",
3930 help='Add extra annotation output for buildbot.')
3931 group
.add_option('--gs_bucket',
3935 help=('Name of Google Storage bucket to upload or '
3936 'download build. e.g., chrome-perf'))
3937 group
.add_option('--target_arch',
3939 choices
=['ia32', 'x64', 'arm'],
3942 help=('The target build architecture. Choices are "ia32" '
3943 '(default), "x64" or "arm".'))
3944 group
.add_option('--target_build_type',
3946 choices
=['Release', 'Debug'],
3948 help='The target build type. Choices are "Release" '
3949 '(default), or "Debug".')
3950 group
.add_option('--builder_host',
3951 dest
='builder_host',
3953 help=('Host address of server to produce build by posting'
3954 ' try job request.'))
3955 group
.add_option('--builder_port',
3956 dest
='builder_port',
3958 help=('HTTP port of the server to produce build by posting'
3959 ' try job request.'))
3960 parser
.add_option_group(group
)
3962 group
= optparse
.OptionGroup(parser
, 'Debug options')
3963 group
.add_option('--debug_ignore_build',
3964 action
="store_true",
3965 help='DEBUG: Don\'t perform builds.')
3966 group
.add_option('--debug_ignore_sync',
3967 action
="store_true",
3968 help='DEBUG: Don\'t perform syncs.')
3969 group
.add_option('--debug_ignore_perf_test',
3970 action
="store_true",
3971 help='DEBUG: Don\'t perform performance tests.')
3972 parser
.add_option_group(group
)
3975 def ParseCommandLine(self
):
3976 """Parses the command line for bisect options."""
3977 parser
= self
._CreateCommandLineParser
()
3978 (opts
, _
) = parser
.parse_args()
3981 if not opts
.command
:
3982 raise RuntimeError('missing required parameter: --command')
3984 if not opts
.good_revision
:
3985 raise RuntimeError('missing required parameter: --good_revision')
3987 if not opts
.bad_revision
:
3988 raise RuntimeError('missing required parameter: --bad_revision')
3990 if not opts
.metric
and opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
:
3991 raise RuntimeError('missing required parameter: --metric')
3994 if not cloud_storage
.List(opts
.gs_bucket
):
3995 raise RuntimeError('Invalid Google Storage: gs://%s' % opts
.gs_bucket
)
3996 if not opts
.builder_host
:
3997 raise RuntimeError('Must specify try server hostname, when '
3998 'gs_bucket is used: --builder_host')
3999 if not opts
.builder_port
:
4000 raise RuntimeError('Must specify try server port number, when '
4001 'gs_bucket is used: --builder_port')
4002 if opts
.target_platform
== 'cros':
4003 # Run sudo up front to make sure credentials are cached for later.
4004 print 'Sudo is required to build cros:'
4006 RunProcess(['sudo', 'true'])
4008 if not opts
.cros_board
:
4009 raise RuntimeError('missing required parameter: --cros_board')
4011 if not opts
.cros_remote_ip
:
4012 raise RuntimeError('missing required parameter: --cros_remote_ip')
4014 if not opts
.working_directory
:
4015 raise RuntimeError('missing required parameter: --working_directory')
4017 metric_values
= opts
.metric
.split('/')
4018 if (len(metric_values
) != 2 and
4019 opts
.bisect_mode
!= BISECT_MODE_RETURN_CODE
):
4020 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
4022 opts
.metric
= metric_values
4023 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
4024 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
4025 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
4026 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
4028 for k
, v
in opts
.__dict
__.iteritems():
4029 assert hasattr(self
, k
), "Invalid %s attribute in BisectOptions." % k
4031 except RuntimeError, e
:
4032 output_string
= StringIO
.StringIO()
4033 parser
.print_help(file=output_string
)
4034 error_message
= '%s\n\n%s' % (e
.message
, output_string
.getvalue())
4035 output_string
.close()
4036 raise RuntimeError(error_message
)
4039 def FromDict(values
):
4040 """Creates an instance of BisectOptions with the values parsed from a
4044 values: a dict containing options to set.
4047 An instance of BisectOptions.
4049 opts
= BisectOptions()
4050 for k
, v
in values
.iteritems():
4051 assert hasattr(opts
, k
), 'Invalid %s attribute in '\
4052 'BisectOptions.' % k
4055 metric_values
= opts
.metric
.split('/')
4056 if len(metric_values
) != 2:
4057 raise RuntimeError("Invalid metric specified: [%s]" % opts
.metric
)
4059 opts
.metric
= metric_values
4060 opts
.repeat_test_count
= min(max(opts
.repeat_test_count
, 1), 100)
4061 opts
.max_time_minutes
= min(max(opts
.max_time_minutes
, 1), 60)
4062 opts
.truncate_percent
= min(max(opts
.truncate_percent
, 0), 25)
4063 opts
.truncate_percent
= opts
.truncate_percent
/ 100.0
4071 opts
= BisectOptions()
4072 opts
.ParseCommandLine()
4075 extra_src
= bisect_utils
.LoadExtraSrc(opts
.extra_src
)
4077 raise RuntimeError("Invalid or missing --extra_src.")
4078 _AddAdditionalDepotInfo(extra_src
.GetAdditionalDepotInfo())
4080 if opts
.working_directory
:
4081 custom_deps
= bisect_utils
.DEFAULT_GCLIENT_CUSTOM_DEPS
4082 if opts
.no_custom_deps
:
4084 bisect_utils
.CreateBisectDirectoryAndSetupDepot(opts
, custom_deps
)
4086 os
.chdir(os
.path
.join(os
.getcwd(), 'src'))
4088 if not RemoveBuildFiles(opts
.target_build_type
):
4089 raise RuntimeError('Something went wrong removing the build files.')
4091 if not IsPlatformSupported(opts
):
4092 raise RuntimeError("Sorry, this platform isn't supported yet.")
4094 # Check what source control method they're using. Only support git workflow
4096 source_control
= DetermineAndCreateSourceControl(opts
)
4098 if not source_control
:
4099 raise RuntimeError("Sorry, only the git workflow is supported at the "
4102 # gClient sync seems to fail if you're not in master branch.
4103 if (not source_control
.IsInProperBranch() and
4104 not opts
.debug_ignore_sync
and
4105 not opts
.working_directory
):
4106 raise RuntimeError("You must switch to master branch to run bisection.")
4107 bisect_test
= BisectPerformanceMetrics(source_control
, opts
)
4109 bisect_results
= bisect_test
.Run(opts
.command
,
4113 if bisect_results
['error']:
4114 raise RuntimeError(bisect_results
['error'])
4115 bisect_test
.FormatAndPrintResults(bisect_results
)
4118 bisect_test
.PerformCleanup()
4119 except RuntimeError, e
:
4120 if opts
.output_buildbot_annotations
:
4121 # The perf dashboard scrapes the "results" step in order to comment on
4122 # bugs. If you change this, please update the perf dashboard as well.
4123 bisect_utils
.OutputAnnotationStepStart('Results')
4124 print 'Error: %s' % e
.message
4125 if opts
.output_buildbot_annotations
:
4126 bisect_utils
.OutputAnnotationStepClosed()
4129 if __name__
== '__main__':