Make iossim binary work on Xcode 5 and 6.
[chromium-blink-merge.git] / tools / bisect-perf-regression.py
blob54695594d8ba568b7ac57fa15020a2de8e1374c6
1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Performance Test Bisect Tool
8 This script bisects a series of changelists using binary search. It starts at
9 a bad revision where a performance metric has regressed, and asks for a last
10 known-good revision. It will then binary search across this revision range by
11 syncing, building, and running a performance test. If the change is
12 suspected to occur as a result of WebKit/V8 changes, the script will
13 further bisect changes to those depots and attempt to narrow down the revision
14 range.
16 Example usage using SVN revisions:
18 ./tools/bisect-perf-regression.py -c\
19 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
20 -g 168222 -b 168232 -m shutdown/simple-user-quit
22 Be aware that if you're using the git workflow and specify an SVN revision,
23 the script will attempt to find the git SHA1 where SVN changes up to that
24 revision were merged in.
26 Example usage using git hashes:
28 ./tools/bisect-perf-regression.py -c\
29 "out/Release/performance_ui_tests --gtest_filter=ShutdownTest.SimpleUserQuit"\
30 -g 1f6e67861535121c5c819c16a666f2436c207e7b\
31 -b b732f23b4f81c382db0b23b9035f3dadc7d925bb\
32 -m shutdown/simple-user-quit
33 """
35 import copy
36 import datetime
37 import errno
38 import hashlib
39 import math
40 import optparse
41 import os
42 import re
43 import shlex
44 import shutil
45 import StringIO
46 import sys
47 import time
48 import zipfile
50 sys.path.append(os.path.join(os.path.dirname(__file__), 'telemetry'))
52 from auto_bisect import bisect_utils
53 from auto_bisect import builder
54 from auto_bisect import math_utils
55 from auto_bisect import post_perf_builder_job as bisect_builder
56 from auto_bisect import source_control as source_control_module
57 from auto_bisect import ttest
58 from telemetry.util import cloud_storage
60 # Below is the map of "depot" names to information about each depot. Each depot
61 # is a repository, and in the process of bisecting, revision ranges in these
62 # repositories may also be bisected.
64 # Each depot information dictionary may contain:
65 # src: Path to the working directory.
66 # recurse: True if this repository will get bisected.
67 # depends: A list of other repositories that are actually part of the same
68 # repository in svn. If the repository has any dependent repositories
69 # (e.g. skia/src needs skia/include and skia/gyp to be updated), then
70 # they are specified here.
71 # svn: URL of SVN repository. Needed for git workflow to resolve hashes to
72 # SVN revisions.
73 # from: Parent depot that must be bisected before this is bisected.
74 # deps_var: Key name in vars variable in DEPS file that has revision
75 # information.
76 DEPOT_DEPS_NAME = {
77 'chromium': {
78 'src': 'src',
79 'recurse': True,
80 'depends': None,
81 'from': ['cros', 'android-chrome'],
82 'viewvc':
83 'http://src.chromium.org/viewvc/chrome?view=revision&revision=',
84 'deps_var': 'chromium_rev'
86 'webkit': {
87 'src': 'src/third_party/WebKit',
88 'recurse': True,
89 'depends': None,
90 'from': ['chromium'],
91 'viewvc':
92 'http://src.chromium.org/viewvc/blink?view=revision&revision=',
93 'deps_var': 'webkit_revision'
95 'angle': {
96 'src': 'src/third_party/angle',
97 'src_old': 'src/third_party/angle_dx11',
98 'recurse': True,
99 'depends': None,
100 'from': ['chromium'],
101 'platform': 'nt',
102 'deps_var': 'angle_revision'
104 'v8': {
105 'src': 'src/v8',
106 'recurse': True,
107 'depends': None,
108 'from': ['chromium'],
109 'custom_deps': bisect_utils.GCLIENT_CUSTOM_DEPS_V8,
110 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
111 'deps_var': 'v8_revision'
113 'v8_bleeding_edge': {
114 'src': 'src/v8_bleeding_edge',
115 'recurse': True,
116 'depends': None,
117 'svn': 'https://v8.googlecode.com/svn/branches/bleeding_edge',
118 'from': ['v8'],
119 'viewvc': 'https://code.google.com/p/v8/source/detail?r=',
120 'deps_var': 'v8_revision'
122 'skia/src': {
123 'src': 'src/third_party/skia/src',
124 'recurse': True,
125 'svn': 'http://skia.googlecode.com/svn/trunk/src',
126 'depends': ['skia/include', 'skia/gyp'],
127 'from': ['chromium'],
128 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
129 'deps_var': 'skia_revision'
131 'skia/include': {
132 'src': 'src/third_party/skia/include',
133 'recurse': False,
134 'svn': 'http://skia.googlecode.com/svn/trunk/include',
135 'depends': None,
136 'from': ['chromium'],
137 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
138 'deps_var': 'None'
140 'skia/gyp': {
141 'src': 'src/third_party/skia/gyp',
142 'recurse': False,
143 'svn': 'http://skia.googlecode.com/svn/trunk/gyp',
144 'depends': None,
145 'from': ['chromium'],
146 'viewvc': 'https://code.google.com/p/skia/source/detail?r=',
147 'deps_var': 'None'
151 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
153 CROS_CHROMEOS_PATTERN = 'chromeos-base/chromeos-chrome'
155 # Possible return values from BisectPerformanceMetrics.SyncBuildAndRunRevision.
156 BUILD_RESULT_SUCCEED = 0
157 BUILD_RESULT_FAIL = 1
158 BUILD_RESULT_SKIPPED = 2
160 # Maximum time in seconds to wait after posting build request to the try server.
161 # TODO: Change these values based on the actual time taken by buildbots on
162 # the try server.
163 MAX_MAC_BUILD_TIME = 14400
164 MAX_WIN_BUILD_TIME = 14400
165 MAX_LINUX_BUILD_TIME = 14400
167 # The confidence percentage at which confidence can be consider "high".
168 HIGH_CONFIDENCE = 95
170 # Patch template to add a new file, DEPS.sha under src folder.
171 # This file contains SHA1 value of the DEPS changes made while bisecting
172 # dependency repositories. This patch send along with DEPS patch to try server.
173 # When a build requested is posted with a patch, bisect builders on try server,
174 # once build is produced, it reads SHA value from this file and appends it
175 # to build archive filename.
176 DEPS_SHA_PATCH = """diff --git src/DEPS.sha src/DEPS.sha
177 new file mode 100644
178 --- /dev/null
179 +++ src/DEPS.sha
180 @@ -0,0 +1 @@
181 +%(deps_sha)s
184 # The possible values of the --bisect_mode flag, which determines what to
185 # use when classifying a revision as "good" or "bad".
186 BISECT_MODE_MEAN = 'mean'
187 BISECT_MODE_STD_DEV = 'std_dev'
188 BISECT_MODE_RETURN_CODE = 'return_code'
190 # The perf dashboard looks for a string like "Estimated Confidence: 95%"
191 # to decide whether or not to cc the author(s). If you change this, please
192 # update the perf dashboard as well.
193 RESULTS_BANNER = """
194 ===== BISECT JOB RESULTS =====
195 Status: %(status)s
197 Test Command: %(command)s
198 Test Metric: %(metrics)s
199 Relative Change: %(change)s
200 Estimated Confidence: %(confidence).02f%%"""
202 # The perf dashboard specifically looks for the string
203 # "Author : " to parse out who to cc on a bug. If you change the
204 # formatting here, please update the perf dashboard as well.
205 RESULTS_REVISION_INFO = """
206 ===== SUSPECTED CL(s) =====
207 Subject : %(subject)s
208 Author : %(author)s%(email_info)s%(commit_info)s
209 Commit : %(cl)s
210 Date : %(cl_date)s"""
212 REPRO_STEPS_LOCAL = """
213 ==== INSTRUCTIONS TO REPRODUCE ====
214 To run locally:
215 $%(command)s"""
217 REPRO_STEPS_TRYJOB = """
218 To reproduce on a performance try bot:
219 1. Create new git branch or check out existing branch.
220 2. Edit tools/run-perf-test.cfg (instructions in file) or \
221 third_party/WebKit/Tools/run-perf-test.cfg.
222 a) Take care to strip any src/ directories from the head of \
223 relative path names.
224 b) On desktop, only --browser=release is supported, on android \
225 --browser=android-chromium-testshell.
226 c) Test command to use: %(command)s
227 3. Upload your patch. --bypass-hooks is necessary to upload the changes you \
228 committed locally to run-perf-test.cfg.
229 Note: *DO NOT* commit run-perf-test.cfg changes to the project repository.
230 $ git cl upload --bypass-hooks
231 4. Send your try job to the try server. \
232 [Please make sure to use appropriate bot to reproduce]
233 $ git cl try -m tryserver.chromium.perf -b <bot>
235 For more details please visit
236 https://sites.google.com/a/chromium.org/dev/developers/performance-try-bots"""
238 RESULTS_THANKYOU = """
239 ===== THANK YOU FOR CHOOSING BISECT AIRLINES =====
240 Visit http://www.chromium.org/developers/core-principles for Chrome's policy
241 on perf regressions.
242 Contact chrome-perf-dashboard-team with any questions or suggestions about
243 bisecting.
244 . .------.
245 . .---. \ \==)
246 . |PERF\ \ \\
247 . | ---------'-------'-----------.
248 . . 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 `-.
249 . \______________.-------._______________)
250 . / /
251 . / /
252 . / /==)
253 . ._______."""
256 def _AddAdditionalDepotInfo(depot_info):
257 """Adds additional depot info to the global depot variables."""
258 global DEPOT_DEPS_NAME
259 global DEPOT_NAMES
260 DEPOT_DEPS_NAME = dict(DEPOT_DEPS_NAME.items() + depot_info.items())
261 DEPOT_NAMES = DEPOT_DEPS_NAME.keys()
264 def ConfidenceScore(good_results_lists, bad_results_lists):
265 """Calculates a confidence score.
267 This score is a percentage which represents our degree of confidence in the
268 proposition that the good results and bad results are distinct groups, and
269 their differences aren't due to chance alone.
272 Args:
273 good_results_lists: A list of lists of "good" result numbers.
274 bad_results_lists: A list of lists of "bad" result numbers.
276 Returns:
277 A number in the range [0, 100].
279 # If there's only one item in either list, this means only one revision was
280 # classified good or bad; this isn't good enough evidence to make a decision.
281 # If an empty list was passed, that also implies zero confidence.
282 if len(good_results_lists) <= 1 or len(bad_results_lists) <= 1:
283 return 0.0
285 # Flatten the lists of results lists.
286 sample1 = sum(good_results_lists, [])
287 sample2 = sum(bad_results_lists, [])
289 # If there were only empty lists in either of the lists (this is unexpected
290 # and normally shouldn't happen), then we also want to return 0.
291 if not sample1 or not sample2:
292 return 0.0
294 # The p-value is approximately the probability of obtaining the given set
295 # of good and bad values just by chance.
296 _, _, p_value = ttest.WelchsTTest(sample1, sample2)
297 return 100.0 * (1.0 - p_value)
300 def GetSHA1HexDigest(contents):
301 """Returns SHA1 hex digest of the given string."""
302 return hashlib.sha1(contents).hexdigest()
305 def GetZipFileName(build_revision=None, target_arch='ia32', patch_sha=None):
306 """Gets the archive file name for the given revision."""
307 def PlatformName():
308 """Return a string to be used in paths for the platform."""
309 if bisect_utils.IsWindowsHost():
310 # Build archive for x64 is still stored with the "win32" suffix.
311 # See chromium_utils.PlatformName().
312 if bisect_utils.Is64BitWindows() and target_arch == 'x64':
313 return 'win32'
314 return 'win32'
315 if bisect_utils.IsLinuxHost():
316 # Android builds are also archived with the "full-build-linux prefix.
317 return 'linux'
318 if bisect_utils.IsMacHost():
319 return 'mac'
320 raise NotImplementedError('Unknown platform "%s".' % sys.platform)
322 base_name = 'full-build-%s' % PlatformName()
323 if not build_revision:
324 return base_name
325 if patch_sha:
326 build_revision = '%s_%s' % (build_revision , patch_sha)
327 return '%s_%s.zip' % (base_name, build_revision)
330 def GetRemoteBuildPath(build_revision, target_platform='chromium',
331 target_arch='ia32', patch_sha=None):
332 """Returns the URL to download the build from."""
333 def GetGSRootFolderName(target_platform):
334 """Returns the Google Cloud Storage root folder name."""
335 if bisect_utils.IsWindowsHost():
336 if bisect_utils.Is64BitWindows() and target_arch == 'x64':
337 return 'Win x64 Builder'
338 return 'Win Builder'
339 if bisect_utils.IsLinuxHost():
340 if target_platform == 'android':
341 return 'android_perf_rel'
342 return 'Linux Builder'
343 if bisect_utils.IsMacHost():
344 return 'Mac Builder'
345 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
347 base_filename = GetZipFileName(
348 build_revision, target_arch, patch_sha)
349 builder_folder = GetGSRootFolderName(target_platform)
350 return '%s/%s' % (builder_folder, base_filename)
353 def FetchFromCloudStorage(bucket_name, source_path, destination_path):
354 """Fetches file(s) from the Google Cloud Storage.
356 Args:
357 bucket_name: Google Storage bucket name.
358 source_path: Source file path.
359 destination_path: Destination file path.
361 Returns:
362 Downloaded file path if exists, otherwise None.
364 target_file = os.path.join(destination_path, os.path.basename(source_path))
365 try:
366 if cloud_storage.Exists(bucket_name, source_path):
367 print 'Fetching file from gs//%s/%s ...' % (bucket_name, source_path)
368 cloud_storage.Get(bucket_name, source_path, destination_path)
369 if os.path.exists(target_file):
370 return target_file
371 else:
372 print ('File gs://%s/%s not found in cloud storage.' % (
373 bucket_name, source_path))
374 except Exception as e:
375 print 'Something went wrong while fetching file from cloud: %s' % e
376 if os.path.exists(target_file):
377 os.remove(target_file)
378 return None
381 # This is copied from build/scripts/common/chromium_utils.py.
382 def MaybeMakeDirectory(*path):
383 """Creates an entire path, if it doesn't already exist."""
384 file_path = os.path.join(*path)
385 try:
386 os.makedirs(file_path)
387 except OSError as e:
388 if e.errno != errno.EEXIST:
389 return False
390 return True
393 # This was copied from build/scripts/common/chromium_utils.py.
394 def ExtractZip(filename, output_dir, verbose=True):
395 """ Extract the zip archive in the output directory."""
396 MaybeMakeDirectory(output_dir)
398 # On Linux and Mac, we use the unzip command as it will
399 # handle links and file bits (executable), which is much
400 # easier then trying to do that with ZipInfo options.
402 # The Mac Version of unzip unfortunately does not support Zip64, whereas
403 # the python module does, so we have to fall back to the python zip module
404 # on Mac if the file size is greater than 4GB.
406 # On Windows, try to use 7z if it is installed, otherwise fall back to python
407 # zip module and pray we don't have files larger than 512MB to unzip.
408 unzip_cmd = None
409 if ((bisect_utils.IsMacHost()
410 and os.path.getsize(filename) < 4 * 1024 * 1024 * 1024)
411 or bisect_utils.IsLinuxHost()):
412 unzip_cmd = ['unzip', '-o']
413 elif (bisect_utils.IsWindowsHost()
414 and os.path.exists('C:\\Program Files\\7-Zip\\7z.exe')):
415 unzip_cmd = ['C:\\Program Files\\7-Zip\\7z.exe', 'x', '-y']
417 if unzip_cmd:
418 # Make sure path is absolute before changing directories.
419 filepath = os.path.abspath(filename)
420 saved_dir = os.getcwd()
421 os.chdir(output_dir)
422 command = unzip_cmd + [filepath]
423 result = bisect_utils.RunProcess(command)
424 os.chdir(saved_dir)
425 if result:
426 raise IOError('unzip failed: %s => %s' % (str(command), result))
427 else:
428 assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost()
429 zf = zipfile.ZipFile(filename)
430 for name in zf.namelist():
431 if verbose:
432 print 'Extracting %s' % name
433 zf.extract(name, output_dir)
434 if bisect_utils.IsMacHost():
435 # Restore permission bits.
436 os.chmod(os.path.join(output_dir, name),
437 zf.getinfo(name).external_attr >> 16L)
440 def WriteStringToFile(text, file_name):
441 """Writes text to a file, raising an RuntimeError on failure."""
442 try:
443 with open(file_name, 'wb') as f:
444 f.write(text)
445 except IOError:
446 raise RuntimeError('Error writing to file [%s]' % file_name )
449 def ReadStringFromFile(file_name):
450 """Writes text to a file, raising an RuntimeError on failure."""
451 try:
452 with open(file_name) as f:
453 return f.read()
454 except IOError:
455 raise RuntimeError('Error reading file [%s]' % file_name )
458 def ChangeBackslashToSlashInPatch(diff_text):
459 """Formats file paths in the given patch text to Unix-style paths."""
460 if not diff_text:
461 return None
462 diff_lines = diff_text.split('\n')
463 for i in range(len(diff_lines)):
464 line = diff_lines[i]
465 if line.startswith('--- ') or line.startswith('+++ '):
466 diff_lines[i] = line.replace('\\', '/')
467 return '\n'.join(diff_lines)
470 def _ParseRevisionsFromDEPSFileManually(deps_file_contents):
471 """Parses the vars section of the DEPS file using regular expressions.
473 Args:
474 deps_file_contents: The DEPS file contents as a string.
476 Returns:
477 A dictionary in the format {depot: revision} if successful, otherwise None.
479 # We'll parse the "vars" section of the DEPS file.
480 rxp = re.compile('vars = {(?P<vars_body>[^}]+)', re.MULTILINE)
481 re_results = rxp.search(deps_file_contents)
483 if not re_results:
484 return None
486 # We should be left with a series of entries in the vars component of
487 # the DEPS file with the following format:
488 # 'depot_name': 'revision',
489 vars_body = re_results.group('vars_body')
490 rxp = re.compile("'(?P<depot_body>[\w_-]+)':[\s]+'(?P<rev_body>[\w@]+)'",
491 re.MULTILINE)
492 re_results = rxp.findall(vars_body)
494 return dict(re_results)
497 def _WaitUntilBuildIsReady(
498 fetch_build, bot_name, builder_host, builder_port, build_request_id,
499 max_timeout):
500 """Waits until build is produced by bisect builder on try server.
502 Args:
503 fetch_build: Function to check and download build from cloud storage.
504 bot_name: Builder bot name on try server.
505 builder_host Try server host name.
506 builder_port: Try server port.
507 build_request_id: A unique ID of the build request posted to try server.
508 max_timeout: Maximum time to wait for the build.
510 Returns:
511 Downloaded archive file path if exists, otherwise None.
513 # Build number on the try server.
514 build_num = None
515 # Interval to check build on cloud storage.
516 poll_interval = 60
517 # Interval to check build status on try server in seconds.
518 status_check_interval = 600
519 last_status_check = time.time()
520 start_time = time.time()
521 while True:
522 # Checks for build on gs://chrome-perf and download if exists.
523 res = fetch_build()
524 if res:
525 return (res, 'Build successfully found')
526 elapsed_status_check = time.time() - last_status_check
527 # To avoid overloading try server with status check requests, we check
528 # build status for every 10 minutes.
529 if elapsed_status_check > status_check_interval:
530 last_status_check = time.time()
531 if not build_num:
532 # Get the build number on try server for the current build.
533 build_num = bisect_builder.GetBuildNumFromBuilder(
534 build_request_id, bot_name, builder_host, builder_port)
535 # Check the status of build using the build number.
536 # Note: Build is treated as PENDING if build number is not found
537 # on the the try server.
538 build_status, status_link = bisect_builder.GetBuildStatus(
539 build_num, bot_name, builder_host, builder_port)
540 if build_status == bisect_builder.FAILED:
541 return (None, 'Failed to produce build, log: %s' % status_link)
542 elapsed_time = time.time() - start_time
543 if elapsed_time > max_timeout:
544 return (None, 'Timed out: %ss without build' % max_timeout)
546 print 'Time elapsed: %ss without build.' % elapsed_time
547 time.sleep(poll_interval)
548 # For some reason, mac bisect bots were not flushing stdout periodically.
549 # As a result buildbot command is timed-out. Flush stdout on all platforms
550 # while waiting for build.
551 sys.stdout.flush()
554 def _UpdateV8Branch(deps_content):
555 """Updates V8 branch in DEPS file to process v8_bleeding_edge.
557 Check for "v8_branch" in DEPS file if exists update its value
558 with v8_bleeding_edge branch. Note: "v8_branch" is added to DEPS
559 variable from DEPS revision 254916, therefore check for "src/v8":
560 <v8 source path> in DEPS in order to support prior DEPS revisions
561 and update it.
563 Args:
564 deps_content: DEPS file contents to be modified.
566 Returns:
567 Modified DEPS file contents as a string.
569 new_branch = r'branches/bleeding_edge'
570 v8_branch_pattern = re.compile(r'(?<="v8_branch": ")(.*)(?=")')
571 if re.search(v8_branch_pattern, deps_content):
572 deps_content = re.sub(v8_branch_pattern, new_branch, deps_content)
573 else:
574 # Replaces the branch assigned to "src/v8" key in DEPS file.
575 # Format of "src/v8" in DEPS:
576 # "src/v8":
577 # (Var("googlecode_url") % "v8") + "/trunk@" + Var("v8_revision"),
578 # So, "/trunk@" is replace with "/branches/bleeding_edge@"
579 v8_src_pattern = re.compile(
580 r'(?<="v8"\) \+ "/)(.*)(?=@" \+ Var\("v8_revision"\))', re.MULTILINE)
581 if re.search(v8_src_pattern, deps_content):
582 deps_content = re.sub(v8_src_pattern, new_branch, deps_content)
583 return deps_content
586 def _UpdateDEPSForAngle(revision, depot, deps_file):
587 """Updates DEPS file with new revision for Angle repository.
589 This is a hack for Angle depot case because, in DEPS file "vars" dictionary
590 variable contains "angle_revision" key that holds git hash instead of
591 SVN revision.
593 And sometimes "angle_revision" key is not specified in "vars" variable,
594 in such cases check "deps" dictionary variable that matches
595 angle.git@[a-fA-F0-9]{40}$ and replace git hash.
597 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
598 try:
599 deps_contents = ReadStringFromFile(deps_file)
600 # Check whether the depot and revision pattern in DEPS file vars variable
601 # e.g. "angle_revision": "fa63e947cb3eccf463648d21a05d5002c9b8adfa".
602 angle_rev_pattern = re.compile(r'(?<="%s": ")([a-fA-F0-9]{40})(?=")' %
603 deps_var, re.MULTILINE)
604 match = re.search(angle_rev_pattern % deps_var, deps_contents)
605 if match:
606 # Update the revision information for the given depot
607 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
608 else:
609 # Check whether the depot and revision pattern in DEPS file deps
610 # variable. e.g.,
611 # "src/third_party/angle": Var("chromium_git") +
612 # "/angle/angle.git@fa63e947cb3eccf463648d21a05d5002c9b8adfa",.
613 angle_rev_pattern = re.compile(
614 r'(?<=angle\.git@)([a-fA-F0-9]{40})(?=")', re.MULTILINE)
615 match = re.search(angle_rev_pattern, deps_contents)
616 if not match:
617 print 'Could not find angle revision information in DEPS file.'
618 return False
619 new_data = re.sub(angle_rev_pattern, revision, deps_contents)
620 # Write changes to DEPS file
621 WriteStringToFile(new_data, deps_file)
622 return True
623 except IOError, e:
624 print 'Something went wrong while updating DEPS file, %s' % e
625 return False
628 def _TryParseHistogramValuesFromOutput(metric, text):
629 """Attempts to parse a metric in the format HISTOGRAM <graph: <trace>.
631 Args:
632 metric: The metric as a list of [<trace>, <value>] strings.
633 text: The text to parse the metric values from.
635 Returns:
636 A list of floating point numbers found, [] if none were found.
638 metric_formatted = 'HISTOGRAM %s: %s= ' % (metric[0], metric[1])
640 text_lines = text.split('\n')
641 values_list = []
643 for current_line in text_lines:
644 if metric_formatted in current_line:
645 current_line = current_line[len(metric_formatted):]
647 try:
648 histogram_values = eval(current_line)
650 for b in histogram_values['buckets']:
651 average_for_bucket = float(b['high'] + b['low']) * 0.5
652 # Extends the list with N-elements with the average for that bucket.
653 values_list.extend([average_for_bucket] * b['count'])
654 except Exception:
655 pass
657 return values_list
660 def _TryParseResultValuesFromOutput(metric, text):
661 """Attempts to parse a metric in the format RESULT <graph>: <trace>= ...
663 Args:
664 metric: The metric as a list of [<trace>, <value>] string pairs.
665 text: The text to parse the metric values from.
667 Returns:
668 A list of floating point numbers found.
670 # Format is: RESULT <graph>: <trace>= <value> <units>
671 metric_re = re.escape('RESULT %s: %s=' % (metric[0], metric[1]))
673 # The log will be parsed looking for format:
674 # <*>RESULT <graph_name>: <trace_name>= <value>
675 single_result_re = re.compile(
676 metric_re + '\s*(?P<VALUE>[-]?\d*(\.\d*)?)')
678 # The log will be parsed looking for format:
679 # <*>RESULT <graph_name>: <trace_name>= [<value>,value,value,...]
680 multi_results_re = re.compile(
681 metric_re + '\s*\[\s*(?P<VALUES>[-]?[\d\., ]+)\s*\]')
683 # The log will be parsed looking for format:
684 # <*>RESULT <graph_name>: <trace_name>= {<mean>, <std deviation>}
685 mean_stddev_re = re.compile(
686 metric_re +
687 '\s*\{\s*(?P<MEAN>[-]?\d*(\.\d*)?),\s*(?P<STDDEV>\d+(\.\d*)?)\s*\}')
689 text_lines = text.split('\n')
690 values_list = []
691 for current_line in text_lines:
692 # Parse the output from the performance test for the metric we're
693 # interested in.
694 single_result_match = single_result_re.search(current_line)
695 multi_results_match = multi_results_re.search(current_line)
696 mean_stddev_match = mean_stddev_re.search(current_line)
697 if (not single_result_match is None and
698 single_result_match.group('VALUE')):
699 values_list += [single_result_match.group('VALUE')]
700 elif (not multi_results_match is None and
701 multi_results_match.group('VALUES')):
702 metric_values = multi_results_match.group('VALUES')
703 values_list += metric_values.split(',')
704 elif (not mean_stddev_match is None and
705 mean_stddev_match.group('MEAN')):
706 values_list += [mean_stddev_match.group('MEAN')]
708 values_list = [float(v) for v in values_list
709 if bisect_utils.IsStringFloat(v)]
711 # If the metric is times/t, we need to sum the timings in order to get
712 # similar regression results as the try-bots.
713 metrics_to_sum = [
714 ['times', 't'],
715 ['times', 'page_load_time'],
716 ['cold_times', 'page_load_time'],
717 ['warm_times', 'page_load_time'],
720 if metric in metrics_to_sum:
721 if values_list:
722 values_list = [reduce(lambda x, y: float(x) + float(y), values_list)]
724 return values_list
727 def _ParseMetricValuesFromOutput(metric, text):
728 """Parses output from performance_ui_tests and retrieves the results for
729 a given metric.
731 Args:
732 metric: The metric as a list of [<trace>, <value>] strings.
733 text: The text to parse the metric values from.
735 Returns:
736 A list of floating point numbers found.
738 metric_values = _TryParseResultValuesFromOutput(metric, text)
740 if not metric_values:
741 metric_values = _TryParseHistogramValuesFromOutput(metric, text)
743 return metric_values
746 def _GenerateProfileIfNecessary(command_args):
747 """Checks the command line of the performance test for dependencies on
748 profile generation, and runs tools/perf/generate_profile as necessary.
750 Args:
751 command_args: Command line being passed to performance test, as a list.
753 Returns:
754 False if profile generation was necessary and failed, otherwise True.
756 if '--profile-dir' in ' '.join(command_args):
757 # If we were using python 2.7+, we could just use the argparse
758 # module's parse_known_args to grab --profile-dir. Since some of the
759 # bots still run 2.6, have to grab the arguments manually.
760 arg_dict = {}
761 args_to_parse = ['--profile-dir', '--browser']
763 for arg_to_parse in args_to_parse:
764 for i, current_arg in enumerate(command_args):
765 if arg_to_parse in current_arg:
766 current_arg_split = current_arg.split('=')
768 # Check 2 cases, --arg=<val> and --arg <val>
769 if len(current_arg_split) == 2:
770 arg_dict[arg_to_parse] = current_arg_split[1]
771 elif i + 1 < len(command_args):
772 arg_dict[arg_to_parse] = command_args[i+1]
774 path_to_generate = os.path.join('tools', 'perf', 'generate_profile')
776 if arg_dict.has_key('--profile-dir') and arg_dict.has_key('--browser'):
777 profile_path, profile_type = os.path.split(arg_dict['--profile-dir'])
778 return not bisect_utils.RunProcess(['python', path_to_generate,
779 '--profile-type-to-generate', profile_type,
780 '--browser', arg_dict['--browser'], '--output-dir', profile_path])
781 return False
782 return True
785 def _AddRevisionsIntoRevisionData(revisions, depot, sort, revision_data):
786 """Adds new revisions to the revision_data dictionary and initializes them.
788 Args:
789 revisions: List of revisions to add.
790 depot: Depot that's currently in use (src, webkit, etc...)
791 sort: Sorting key for displaying revisions.
792 revision_data: A dictionary to add the new revisions into.
793 Existing revisions will have their sort keys adjusted.
795 num_depot_revisions = len(revisions)
797 for _, v in revision_data.iteritems():
798 if v['sort'] > sort:
799 v['sort'] += num_depot_revisions
801 for i in xrange(num_depot_revisions):
802 r = revisions[i]
803 revision_data[r] = {
804 'revision' : r,
805 'depot' : depot,
806 'value' : None,
807 'perf_time' : 0,
808 'build_time' : 0,
809 'passed' : '?',
810 'sort' : i + sort + 1,
814 def _PrintThankYou():
815 print RESULTS_THANKYOU
818 def _PrintTableRow(column_widths, row_data):
819 """Prints out a row in a formatted table that has columns aligned.
821 Args:
822 column_widths: A list of column width numbers.
823 row_data: A list of items for each column in this row.
825 assert len(column_widths) == len(row_data)
826 text = ''
827 for i in xrange(len(column_widths)):
828 current_row_data = row_data[i].center(column_widths[i], ' ')
829 text += ('%%%ds' % column_widths[i]) % current_row_data
830 print text
833 def _PrintStepTime(revision_data_sorted):
834 """Prints information about how long various steps took.
836 Args:
837 revision_data_sorted: The sorted list of revision data dictionaries."""
838 step_perf_time_avg = 0.0
839 step_build_time_avg = 0.0
840 step_count = 0.0
841 for _, current_data in revision_data_sorted:
842 if current_data['value']:
843 step_perf_time_avg += current_data['perf_time']
844 step_build_time_avg += current_data['build_time']
845 step_count += 1
846 if step_count:
847 step_perf_time_avg = step_perf_time_avg / step_count
848 step_build_time_avg = step_build_time_avg / step_count
849 print
850 print 'Average build time : %s' % datetime.timedelta(
851 seconds=int(step_build_time_avg))
852 print 'Average test time : %s' % datetime.timedelta(
853 seconds=int(step_perf_time_avg))
856 def _FindOtherRegressions(revision_data_sorted, bad_greater_than_good):
857 """Compiles a list of other possible regressions from the revision data.
859 Args:
860 revision_data_sorted: Sorted list of (revision, revision data) pairs.
861 bad_greater_than_good: Whether the result value at the "bad" revision is
862 numerically greater than the result value at the "good" revision.
864 Returns:
865 A list of [current_rev, previous_rev, confidence] for other places where
866 there may have been a regression.
868 other_regressions = []
869 previous_values = []
870 previous_id = None
871 for current_id, current_data in revision_data_sorted:
872 current_values = current_data['value']
873 if current_values:
874 current_values = current_values['values']
875 if previous_values:
876 confidence = ConfidenceScore(previous_values, [current_values])
877 mean_of_prev_runs = math_utils.Mean(sum(previous_values, []))
878 mean_of_current_runs = math_utils.Mean(current_values)
880 # Check that the potential regression is in the same direction as
881 # the overall regression. If the mean of the previous runs < the
882 # mean of the current runs, this local regression is in same
883 # direction.
884 prev_less_than_current = mean_of_prev_runs < mean_of_current_runs
885 is_same_direction = (prev_less_than_current if
886 bad_greater_than_good else not prev_less_than_current)
888 # Only report potential regressions with high confidence.
889 if is_same_direction and confidence > 50:
890 other_regressions.append([current_id, previous_id, confidence])
891 previous_values.append(current_values)
892 previous_id = current_id
893 return other_regressions
896 class BisectPerformanceMetrics(object):
897 """This class contains functionality to perform a bisection of a range of
898 revisions to narrow down where performance regressions may have occurred.
900 The main entry-point is the Run method.
903 def __init__(self, source_control, opts):
904 super(BisectPerformanceMetrics, self).__init__()
906 self.opts = opts
907 self.source_control = source_control
908 self.src_cwd = os.getcwd()
909 self.cros_cwd = os.path.join(os.getcwd(), '..', 'cros')
910 self.depot_cwd = {}
911 self.cleanup_commands = []
912 self.warnings = []
913 self.builder = builder.Builder.FromOpts(opts)
915 # This always starts true since the script grabs latest first.
916 self.was_blink = True
918 for d in DEPOT_NAMES:
919 # The working directory of each depot is just the path to the depot, but
920 # since we're already in 'src', we can skip that part.
922 self.depot_cwd[d] = os.path.join(
923 self.src_cwd, DEPOT_DEPS_NAME[d]['src'][4:])
925 def PerformCleanup(self):
926 """Performs cleanup when script is finished."""
927 os.chdir(self.src_cwd)
928 for c in self.cleanup_commands:
929 if c[0] == 'mv':
930 shutil.move(c[1], c[2])
931 else:
932 assert False, 'Invalid cleanup command.'
934 def GetRevisionList(self, depot, bad_revision, good_revision):
935 """Retrieves a list of all the commits between the bad revision and
936 last known good revision."""
938 revision_work_list = []
940 if depot == 'cros':
941 revision_range_start = good_revision
942 revision_range_end = bad_revision
944 cwd = os.getcwd()
945 self.ChangeToDepotWorkingDirectory('cros')
947 # Print the commit timestamps for every commit in the revision time
948 # range. We'll sort them and bisect by that. There is a remote chance that
949 # 2 (or more) commits will share the exact same timestamp, but it's
950 # probably safe to ignore that case.
951 cmd = ['repo', 'forall', '-c',
952 'git log --format=%%ct --before=%d --after=%d' % (
953 revision_range_end, revision_range_start)]
954 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
956 assert not return_code, ('An error occurred while running '
957 '"%s"' % ' '.join(cmd))
959 os.chdir(cwd)
961 revision_work_list = list(set(
962 [int(o) for o in output.split('\n') if bisect_utils.IsStringInt(o)]))
963 revision_work_list = sorted(revision_work_list, reverse=True)
964 else:
965 cwd = self._GetDepotDirectory(depot)
966 revision_work_list = self.source_control.GetRevisionList(bad_revision,
967 good_revision, cwd=cwd)
969 return revision_work_list
971 def _GetV8BleedingEdgeFromV8TrunkIfMappable(self, revision):
972 svn_revision = self.source_control.SVNFindRev(revision)
974 if bisect_utils.IsStringInt(svn_revision):
975 # V8 is tricky to bisect, in that there are only a few instances when
976 # we can dive into bleeding_edge and get back a meaningful result.
977 # Try to detect a V8 "business as usual" case, which is when:
978 # 1. trunk revision N has description "Version X.Y.Z"
979 # 2. bleeding_edge revision (N-1) has description "Prepare push to
980 # trunk. Now working on X.Y.(Z+1)."
982 # As of 01/24/2014, V8 trunk descriptions are formatted:
983 # "Version 3.X.Y (based on bleeding_edge revision rZ)"
984 # So we can just try parsing that out first and fall back to the old way.
985 v8_dir = self._GetDepotDirectory('v8')
986 v8_bleeding_edge_dir = self._GetDepotDirectory('v8_bleeding_edge')
988 revision_info = self.source_control.QueryRevisionInfo(revision,
989 cwd=v8_dir)
991 version_re = re.compile("Version (?P<values>[0-9,.]+)")
993 regex_results = version_re.search(revision_info['subject'])
995 if regex_results:
996 git_revision = None
998 # Look for "based on bleeding_edge" and parse out revision
999 if 'based on bleeding_edge' in revision_info['subject']:
1000 try:
1001 bleeding_edge_revision = revision_info['subject'].split(
1002 'bleeding_edge revision r')[1]
1003 bleeding_edge_revision = int(bleeding_edge_revision.split(')')[0])
1004 git_revision = self.source_control.ResolveToRevision(
1005 bleeding_edge_revision, 'v8_bleeding_edge', DEPOT_DEPS_NAME, 1,
1006 cwd=v8_bleeding_edge_dir)
1007 return git_revision
1008 except (IndexError, ValueError):
1009 pass
1011 if not git_revision:
1012 # Wasn't successful, try the old way of looking for "Prepare push to"
1013 git_revision = self.source_control.ResolveToRevision(
1014 int(svn_revision) - 1, 'v8_bleeding_edge', DEPOT_DEPS_NAME, -1,
1015 cwd=v8_bleeding_edge_dir)
1017 if git_revision:
1018 revision_info = self.source_control.QueryRevisionInfo(git_revision,
1019 cwd=v8_bleeding_edge_dir)
1021 if 'Prepare push to trunk' in revision_info['subject']:
1022 return git_revision
1023 return None
1025 def _GetNearestV8BleedingEdgeFromTrunk(self, revision, search_forward=True):
1026 cwd = self._GetDepotDirectory('v8')
1027 cmd = ['log', '--format=%ct', '-1', revision]
1028 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
1029 commit_time = int(output)
1030 commits = []
1032 if search_forward:
1033 cmd = ['log', '--format=%H', '-10', '--after=%d' % commit_time,
1034 'origin/master']
1035 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
1036 output = output.split()
1037 commits = output
1038 commits = reversed(commits)
1039 else:
1040 cmd = ['log', '--format=%H', '-10', '--before=%d' % commit_time,
1041 'origin/master']
1042 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
1043 output = output.split()
1044 commits = output
1046 bleeding_edge_revision = None
1048 for c in commits:
1049 bleeding_edge_revision = self._GetV8BleedingEdgeFromV8TrunkIfMappable(c)
1050 if bleeding_edge_revision:
1051 break
1053 return bleeding_edge_revision
1055 def _ParseRevisionsFromDEPSFile(self, depot):
1056 """Parses the local DEPS file to determine blink/skia/v8 revisions which may
1057 be needed if the bisect recurses into those depots later.
1059 Args:
1060 depot: Name of depot being bisected.
1062 Returns:
1063 A dict in the format {depot:revision} if successful, otherwise None.
1065 try:
1066 deps_data = {
1067 'Var': lambda _: deps_data["vars"][_],
1068 'From': lambda *args: None,
1070 execfile(bisect_utils.FILE_DEPS_GIT, {}, deps_data)
1071 deps_data = deps_data['deps']
1073 rxp = re.compile(".git@(?P<revision>[a-fA-F0-9]+)")
1074 results = {}
1075 for depot_name, depot_data in DEPOT_DEPS_NAME.iteritems():
1076 if (depot_data.get('platform') and
1077 depot_data.get('platform') != os.name):
1078 continue
1080 if (depot_data.get('recurse') and depot in depot_data.get('from')):
1081 depot_data_src = depot_data.get('src') or depot_data.get('src_old')
1082 src_dir = deps_data.get(depot_data_src)
1083 if src_dir:
1084 self.depot_cwd[depot_name] = os.path.join(self.src_cwd,
1085 depot_data_src[4:])
1086 re_results = rxp.search(src_dir)
1087 if re_results:
1088 results[depot_name] = re_results.group('revision')
1089 else:
1090 warning_text = ('Could not parse revision for %s while bisecting '
1091 '%s' % (depot_name, depot))
1092 if not warning_text in self.warnings:
1093 self.warnings.append(warning_text)
1094 else:
1095 results[depot_name] = None
1096 return results
1097 except ImportError:
1098 deps_file_contents = ReadStringFromFile(bisect_utils.FILE_DEPS_GIT)
1099 parse_results = _ParseRevisionsFromDEPSFileManually(deps_file_contents)
1100 results = {}
1101 for depot_name, depot_revision in parse_results.iteritems():
1102 depot_revision = depot_revision.strip('@')
1103 print depot_name, depot_revision
1104 for current_name, current_data in DEPOT_DEPS_NAME.iteritems():
1105 if (current_data.has_key('deps_var') and
1106 current_data['deps_var'] == depot_name):
1107 src_name = current_name
1108 results[src_name] = depot_revision
1109 break
1110 return results
1112 def _Get3rdPartyRevisions(self, depot):
1113 """Parses the DEPS file to determine WebKit/v8/etc... versions.
1115 Args:
1116 depot: A depot name. Should be in the DEPOT_NAMES list.
1118 Returns:
1119 A dict in the format {depot: revision} if successful, otherwise None.
1121 cwd = os.getcwd()
1122 self.ChangeToDepotWorkingDirectory(depot)
1124 results = {}
1126 if depot == 'chromium' or depot == 'android-chrome':
1127 results = self._ParseRevisionsFromDEPSFile(depot)
1128 os.chdir(cwd)
1130 if depot == 'cros':
1131 cmd = [
1132 bisect_utils.CROS_SDK_PATH,
1133 '--',
1134 'portageq-%s' % self.opts.cros_board,
1135 'best_visible',
1136 '/build/%s' % self.opts.cros_board,
1137 'ebuild',
1138 CROS_CHROMEOS_PATTERN
1140 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
1142 assert not return_code, ('An error occurred while running '
1143 '"%s"' % ' '.join(cmd))
1145 if len(output) > CROS_CHROMEOS_PATTERN:
1146 output = output[len(CROS_CHROMEOS_PATTERN):]
1148 if len(output) > 1:
1149 output = output.split('_')[0]
1151 if len(output) > 3:
1152 contents = output.split('.')
1154 version = contents[2]
1156 if contents[3] != '0':
1157 warningText = ('Chrome version: %s.%s but using %s.0 to bisect.' %
1158 (version, contents[3], version))
1159 if not warningText in self.warnings:
1160 self.warnings.append(warningText)
1162 cwd = os.getcwd()
1163 self.ChangeToDepotWorkingDirectory('chromium')
1164 cmd = ['log', '-1', '--format=%H',
1165 '--author=chrome-release@google.com',
1166 '--grep=to %s' % version, 'origin/master']
1167 return_code = bisect_utils.CheckRunGit(cmd)
1168 os.chdir(cwd)
1170 results['chromium'] = output.strip()
1172 if depot == 'v8':
1173 # We can't try to map the trunk revision to bleeding edge yet, because
1174 # we don't know which direction to try to search in. Have to wait until
1175 # the bisect has narrowed the results down to 2 v8 rolls.
1176 results['v8_bleeding_edge'] = None
1178 return results
1180 def BackupOrRestoreOutputdirectory(self, restore=False, build_type='Release'):
1181 """Backs up or restores build output directory based on restore argument.
1183 Args:
1184 restore: Indicates whether to restore or backup. Default is False(Backup)
1185 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1187 Returns:
1188 Path to backup or restored location as string. otherwise None if it fails.
1190 build_dir = os.path.abspath(
1191 builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1192 source_dir = os.path.join(build_dir, build_type)
1193 destination_dir = os.path.join(build_dir, '%s.bak' % build_type)
1194 if restore:
1195 source_dir, destination_dir = destination_dir, source_dir
1196 if os.path.exists(source_dir):
1197 RmTreeAndMkDir(destination_dir, skip_makedir=True)
1198 shutil.move(source_dir, destination_dir)
1199 return destination_dir
1200 return None
1202 def GetBuildArchiveForRevision(self, revision, gs_bucket, target_arch,
1203 patch_sha, out_dir):
1204 """Checks and downloads build archive for a given revision.
1206 Checks for build archive with Git hash or SVN revision. If either of the
1207 file exists, then downloads the archive file.
1209 Args:
1210 revision: A Git hash revision.
1211 gs_bucket: Cloud storage bucket name
1212 target_arch: 32 or 64 bit build target
1213 patch: A DEPS patch (used while bisecting 3rd party repositories).
1214 out_dir: Build output directory where downloaded file is stored.
1216 Returns:
1217 Downloaded archive file path if exists, otherwise None.
1219 # Source archive file path on cloud storage using Git revision.
1220 source_file = GetRemoteBuildPath(
1221 revision, self.opts.target_platform, target_arch, patch_sha)
1222 downloaded_archive = FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1223 if not downloaded_archive:
1224 # Get SVN revision for the given SHA.
1225 svn_revision = self.source_control.SVNFindRev(revision)
1226 if svn_revision:
1227 # Source archive file path on cloud storage using SVN revision.
1228 source_file = GetRemoteBuildPath(
1229 svn_revision, self.opts.target_platform, target_arch, patch_sha)
1230 return FetchFromCloudStorage(gs_bucket, source_file, out_dir)
1231 return downloaded_archive
1233 def DownloadCurrentBuild(self, revision, build_type='Release', patch=None):
1234 """Downloads the build archive for the given revision.
1236 Args:
1237 revision: The Git revision to download or build.
1238 build_type: Target build type ('Release', 'Debug', 'Release_x64' etc.)
1239 patch: A DEPS patch (used while bisecting 3rd party repositories).
1241 Returns:
1242 True if download succeeds, otherwise False.
1244 patch_sha = None
1245 if patch:
1246 # Get the SHA of the DEPS changes patch.
1247 patch_sha = GetSHA1HexDigest(patch)
1249 # Update the DEPS changes patch with a patch to create a new file named
1250 # 'DEPS.sha' and add patch_sha evaluated above to it.
1251 patch = '%s\n%s' % (patch, DEPS_SHA_PATCH % {'deps_sha': patch_sha})
1253 # Get Build output directory
1254 abs_build_dir = os.path.abspath(
1255 builder.GetBuildOutputDirectory(self.opts, self.src_cwd))
1257 fetch_build_func = lambda: self.GetBuildArchiveForRevision(
1258 revision, self.opts.gs_bucket, self.opts.target_arch,
1259 patch_sha, abs_build_dir)
1261 # Downloaded archive file path, downloads build archive for given revision.
1262 downloaded_file = fetch_build_func()
1264 # When build archive doesn't exists, post a build request to tryserver
1265 # and wait for the build to be produced.
1266 if not downloaded_file:
1267 downloaded_file = self.PostBuildRequestAndWait(
1268 revision, fetch_build=fetch_build_func, patch=patch)
1269 if not downloaded_file:
1270 return False
1272 # Generic name for the archive, created when archive file is extracted.
1273 output_dir = os.path.join(
1274 abs_build_dir, GetZipFileName(target_arch=self.opts.target_arch))
1275 # Unzip build archive directory.
1276 try:
1277 RmTreeAndMkDir(output_dir, skip_makedir=True)
1278 self.BackupOrRestoreOutputdirectory(restore=False)
1279 # Build output directory based on target(e.g. out/Release, out/Debug).
1280 target_build_output_dir = os.path.join(abs_build_dir, build_type)
1281 ExtractZip(downloaded_file, abs_build_dir)
1282 if not os.path.exists(output_dir):
1283 # Due to recipe changes, the builds extract folder contains
1284 # out/Release instead of full-build-<platform>/Release.
1285 if os.path.exists(os.path.join(abs_build_dir, 'out', build_type)):
1286 output_dir = os.path.join(abs_build_dir, 'out', build_type)
1287 else:
1288 raise IOError('Missing extracted folder %s ' % output_dir)
1290 print 'Moving build from %s to %s' % (
1291 output_dir, target_build_output_dir)
1292 shutil.move(output_dir, target_build_output_dir)
1293 return True
1294 except Exception as e:
1295 print 'Something went wrong while extracting archive file: %s' % e
1296 self.BackupOrRestoreOutputdirectory(restore=True)
1297 # Cleanup any leftovers from unzipping.
1298 if os.path.exists(output_dir):
1299 RmTreeAndMkDir(output_dir, skip_makedir=True)
1300 finally:
1301 # Delete downloaded archive
1302 if os.path.exists(downloaded_file):
1303 os.remove(downloaded_file)
1304 return False
1306 def PostBuildRequestAndWait(self, revision, fetch_build, patch=None):
1307 """POSTs the build request job to the try server instance.
1309 A try job build request is posted to tryserver.chromium.perf master,
1310 and waits for the binaries to be produced and archived on cloud storage.
1311 Once the build is ready and stored onto cloud, build archive is downloaded
1312 into the output folder.
1314 Args:
1315 revision: A Git hash revision.
1316 fetch_build: Function to check and download build from cloud storage.
1317 patch: A DEPS patch (used while bisecting 3rd party repositories).
1319 Returns:
1320 Downloaded archive file path when requested build exists and download is
1321 successful, otherwise None.
1323 # Get SVN revision for the given SHA.
1324 svn_revision = self.source_control.SVNFindRev(revision)
1325 if not svn_revision:
1326 raise RuntimeError(
1327 'Failed to determine SVN revision for %s' % revision)
1329 def GetBuilderNameAndBuildTime(target_platform, target_arch='ia32'):
1330 """Gets builder bot name and build time in seconds based on platform."""
1331 # Bot names should match the one listed in tryserver.chromium's
1332 # master.cfg which produces builds for bisect.
1333 if bisect_utils.IsWindowsHost():
1334 if bisect_utils.Is64BitWindows() and target_arch == 'x64':
1335 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1336 return ('win_perf_bisect_builder', MAX_WIN_BUILD_TIME)
1337 if bisect_utils.IsLinuxHost():
1338 if target_platform == 'android':
1339 return ('android_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1340 return ('linux_perf_bisect_builder', MAX_LINUX_BUILD_TIME)
1341 if bisect_utils.IsMacHost():
1342 return ('mac_perf_bisect_builder', MAX_MAC_BUILD_TIME)
1343 raise NotImplementedError('Unsupported Platform "%s".' % sys.platform)
1344 if not fetch_build:
1345 return False
1347 bot_name, build_timeout = GetBuilderNameAndBuildTime(
1348 self.opts.target_platform, self.opts.target_arch)
1349 builder_host = self.opts.builder_host
1350 builder_port = self.opts.builder_port
1351 # Create a unique ID for each build request posted to try server builders.
1352 # This ID is added to "Reason" property of the build.
1353 build_request_id = GetSHA1HexDigest(
1354 '%s-%s-%s' % (svn_revision, patch, time.time()))
1356 # Creates a try job description.
1357 job_args = {
1358 'revision': 'src@%s' % svn_revision,
1359 'bot': bot_name,
1360 'name': build_request_id,
1362 # Update patch information if supplied.
1363 if patch:
1364 job_args['patch'] = patch
1365 # Posts job to build the revision on the server.
1366 if bisect_builder.PostTryJob(builder_host, builder_port, job_args):
1367 target_file, error_msg = _WaitUntilBuildIsReady(
1368 fetch_build, bot_name, builder_host, builder_port, build_request_id,
1369 build_timeout)
1370 if not target_file:
1371 print '%s [revision: %s]' % (error_msg, svn_revision)
1372 return None
1373 return target_file
1374 print 'Failed to post build request for revision: [%s]' % svn_revision
1375 return None
1377 def IsDownloadable(self, depot):
1378 """Checks if build can be downloaded based on target platform and depot."""
1379 if (self.opts.target_platform in ['chromium', 'android'] and
1380 self.opts.gs_bucket):
1381 return (depot == 'chromium' or
1382 'chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1383 'v8' in DEPOT_DEPS_NAME[depot]['from'])
1384 return False
1386 def UpdateDeps(self, revision, depot, deps_file):
1387 """Updates DEPS file with new revision of dependency repository.
1389 This method search DEPS for a particular pattern in which depot revision
1390 is specified (e.g "webkit_revision": "123456"). If a match is found then
1391 it resolves the given git hash to SVN revision and replace it in DEPS file.
1393 Args:
1394 revision: A git hash revision of the dependency repository.
1395 depot: Current depot being bisected.
1396 deps_file: Path to DEPS file.
1398 Returns:
1399 True if DEPS file is modified successfully, otherwise False.
1401 if not os.path.exists(deps_file):
1402 return False
1404 deps_var = DEPOT_DEPS_NAME[depot]['deps_var']
1405 # Don't update DEPS file if deps_var is not set in DEPOT_DEPS_NAME.
1406 if not deps_var:
1407 print 'DEPS update not supported for Depot: %s', depot
1408 return False
1410 # Hack for Angle repository. In the DEPS file, "vars" dictionary variable
1411 # contains "angle_revision" key that holds git hash instead of SVN revision.
1412 # And sometime "angle_revision" key is not specified in "vars" variable.
1413 # In such cases check, "deps" dictionary variable that matches
1414 # angle.git@[a-fA-F0-9]{40}$ and replace git hash.
1415 if depot == 'angle':
1416 return _UpdateDEPSForAngle(revision, depot, deps_file)
1418 try:
1419 deps_contents = ReadStringFromFile(deps_file)
1420 # Check whether the depot and revision pattern in DEPS file vars
1421 # e.g. for webkit the format is "webkit_revision": "12345".
1422 deps_revision = re.compile(r'(?<="%s": ")([0-9]+)(?=")' % deps_var,
1423 re.MULTILINE)
1424 match = re.search(deps_revision, deps_contents)
1425 if match:
1426 svn_revision = self.source_control.SVNFindRev(
1427 revision, self._GetDepotDirectory(depot))
1428 if not svn_revision:
1429 print 'Could not determine SVN revision for %s' % revision
1430 return False
1431 # Update the revision information for the given depot
1432 new_data = re.sub(deps_revision, str(svn_revision), deps_contents)
1434 # For v8_bleeding_edge revisions change V8 branch in order
1435 # to fetch bleeding edge revision.
1436 if depot == 'v8_bleeding_edge':
1437 new_data = _UpdateV8Branch(new_data)
1438 if not new_data:
1439 return False
1440 # Write changes to DEPS file
1441 WriteStringToFile(new_data, deps_file)
1442 return True
1443 except IOError, e:
1444 print 'Something went wrong while updating DEPS file. [%s]' % e
1445 return False
1447 def CreateDEPSPatch(self, depot, revision):
1448 """Modifies DEPS and returns diff as text.
1450 Args:
1451 depot: Current depot being bisected.
1452 revision: A git hash revision of the dependency repository.
1454 Returns:
1455 A tuple with git hash of chromium revision and DEPS patch text.
1457 deps_file_path = os.path.join(self.src_cwd, bisect_utils.FILE_DEPS)
1458 if not os.path.exists(deps_file_path):
1459 raise RuntimeError('DEPS file does not exists.[%s]' % deps_file_path)
1460 # Get current chromium revision (git hash).
1461 cmd = ['rev-parse', 'HEAD']
1462 chromium_sha = bisect_utils.CheckRunGit(cmd).strip()
1463 if not chromium_sha:
1464 raise RuntimeError('Failed to determine Chromium revision for %s' %
1465 revision)
1466 if ('chromium' in DEPOT_DEPS_NAME[depot]['from'] or
1467 'v8' in DEPOT_DEPS_NAME[depot]['from']):
1468 # Checkout DEPS file for the current chromium revision.
1469 if self.source_control.CheckoutFileAtRevision(
1470 bisect_utils.FILE_DEPS, chromium_sha, cwd=self.src_cwd):
1471 if self.UpdateDeps(revision, depot, deps_file_path):
1472 diff_command = [
1473 'diff',
1474 '--src-prefix=src/',
1475 '--dst-prefix=src/',
1476 '--no-ext-diff',
1477 bisect_utils.FILE_DEPS,
1479 diff_text = bisect_utils.CheckRunGit(diff_command, cwd=self.src_cwd)
1480 return (chromium_sha, ChangeBackslashToSlashInPatch(diff_text))
1481 else:
1482 raise RuntimeError(
1483 'Failed to update DEPS file for chromium: [%s]' % chromium_sha)
1484 else:
1485 raise RuntimeError(
1486 'DEPS checkout Failed for chromium revision : [%s]' % chromium_sha)
1487 return (None, None)
1489 def BuildCurrentRevision(self, depot, revision=None):
1490 """Builds chrome and performance_ui_tests on the current revision.
1492 Returns:
1493 True if the build was successful.
1495 if self.opts.debug_ignore_build:
1496 return True
1497 cwd = os.getcwd()
1498 os.chdir(self.src_cwd)
1499 # Fetch build archive for the given revision from the cloud storage when
1500 # the storage bucket is passed.
1501 if self.IsDownloadable(depot) and revision:
1502 deps_patch = None
1503 if depot != 'chromium':
1504 # Create a DEPS patch with new revision for dependency repository.
1505 revision, deps_patch = self.CreateDEPSPatch(depot, revision)
1506 if self.DownloadCurrentBuild(revision, patch=deps_patch):
1507 os.chdir(cwd)
1508 if deps_patch:
1509 # Reverts the changes to DEPS file.
1510 self.source_control.CheckoutFileAtRevision(
1511 bisect_utils.FILE_DEPS, revision, cwd=self.src_cwd)
1512 return True
1513 return False
1515 # These codes are executed when bisect bots builds binaries locally.
1516 build_success = self.builder.Build(depot, self.opts)
1517 os.chdir(cwd)
1518 return build_success
1520 def RunGClientHooks(self):
1521 """Runs gclient with runhooks command.
1523 Returns:
1524 True if gclient reports no errors.
1526 if self.opts.debug_ignore_build:
1527 return True
1528 return not bisect_utils.RunGClient(['runhooks'], cwd=self.src_cwd)
1530 def _IsBisectModeUsingMetric(self):
1531 return self.opts.bisect_mode in [BISECT_MODE_MEAN, BISECT_MODE_STD_DEV]
1533 def _IsBisectModeReturnCode(self):
1534 return self.opts.bisect_mode in [BISECT_MODE_RETURN_CODE]
1536 def _IsBisectModeStandardDeviation(self):
1537 return self.opts.bisect_mode in [BISECT_MODE_STD_DEV]
1539 def GetCompatibleCommand(self, command_to_run, revision, depot):
1540 # Prior to crrev.com/274857 *only* android-chromium-testshell
1541 # Then until crrev.com/276628 *both* (android-chromium-testshell and
1542 # android-chrome-shell) work. After that rev 276628 *only*
1543 # android-chrome-shell works. bisect-perf-regression.py script should
1544 # handle these cases and set appropriate browser type based on revision.
1545 if self.opts.target_platform in ['android']:
1546 # When its a third_party depot, get the chromium revision.
1547 if depot != 'chromium':
1548 revision = bisect_utils.CheckRunGit(
1549 ['rev-parse', 'HEAD'], cwd=self.src_cwd).strip()
1550 svn_revision = self.source_control.SVNFindRev(revision, cwd=self.src_cwd)
1551 if not svn_revision:
1552 return command_to_run
1553 cmd_re = re.compile('--browser=(?P<browser_type>\S+)')
1554 matches = cmd_re.search(command_to_run)
1555 if bisect_utils.IsStringInt(svn_revision) and matches:
1556 cmd_browser = matches.group('browser_type')
1557 if svn_revision <= 274857 and cmd_browser == 'android-chrome-shell':
1558 return command_to_run.replace(cmd_browser,
1559 'android-chromium-testshell')
1560 elif (svn_revision >= 276628 and
1561 cmd_browser == 'android-chromium-testshell'):
1562 return command_to_run.replace(cmd_browser,
1563 'android-chrome-shell')
1564 return command_to_run
1566 def RunPerformanceTestAndParseResults(
1567 self, command_to_run, metric, reset_on_first_run=False,
1568 upload_on_last_run=False, results_label=None):
1569 """Runs a performance test on the current revision and parses the results.
1571 Args:
1572 command_to_run: The command to be run to execute the performance test.
1573 metric: The metric to parse out from the results of the performance test.
1574 This is the result chart name and trace name, separated by slash.
1575 May be None for perf try jobs.
1576 reset_on_first_run: If True, pass the flag --reset-results on first run.
1577 upload_on_last_run: If True, pass the flag --upload-results on last run.
1578 results_label: A value for the option flag --results-label.
1579 The arguments reset_on_first_run, upload_on_last_run and results_label
1580 are all ignored if the test is not a Telemetry test.
1582 Returns:
1583 (values dict, 0) if --debug_ignore_perf_test was passed.
1584 (values dict, 0, test output) if the test was run successfully.
1585 (error message, -1) if the test couldn't be run.
1586 (error message, -1, test output) if the test ran but there was an error.
1588 success_code, failure_code = 0, -1
1590 if self.opts.debug_ignore_perf_test:
1591 fake_results = {
1592 'mean': 0.0,
1593 'std_err': 0.0,
1594 'std_dev': 0.0,
1595 'values': [0.0]
1597 return (fake_results, success_code)
1599 # For Windows platform set posix=False, to parse windows paths correctly.
1600 # On Windows, path separators '\' or '\\' are replace by '' when posix=True,
1601 # refer to http://bugs.python.org/issue1724822. By default posix=True.
1602 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
1604 if not _GenerateProfileIfNecessary(args):
1605 err_text = 'Failed to generate profile for performance test.'
1606 return (err_text, failure_code)
1608 # If running a Telemetry test for Chrome OS, insert the remote IP and
1609 # identity parameters.
1610 is_telemetry = bisect_utils.IsTelemetryCommand(command_to_run)
1611 if self.opts.target_platform == 'cros' and is_telemetry:
1612 args.append('--remote=%s' % self.opts.cros_remote_ip)
1613 args.append('--identity=%s' % bisect_utils.CROS_TEST_KEY_PATH)
1615 start_time = time.time()
1617 metric_values = []
1618 output_of_all_runs = ''
1619 for i in xrange(self.opts.repeat_test_count):
1620 # Can ignore the return code since if the tests fail, it won't return 0.
1621 current_args = copy.copy(args)
1622 if is_telemetry:
1623 if i == 0 and reset_on_first_run:
1624 current_args.append('--reset-results')
1625 elif i == self.opts.repeat_test_count - 1 and upload_on_last_run:
1626 current_args.append('--upload-results')
1627 if results_label:
1628 current_args.append('--results-label=%s' % results_label)
1629 try:
1630 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
1631 current_args, cwd=self.src_cwd)
1632 except OSError, e:
1633 if e.errno == errno.ENOENT:
1634 err_text = ('Something went wrong running the performance test. '
1635 'Please review the command line:\n\n')
1636 if 'src/' in ' '.join(args):
1637 err_text += ('Check that you haven\'t accidentally specified a '
1638 'path with src/ in the command.\n\n')
1639 err_text += ' '.join(args)
1640 err_text += '\n'
1642 return (err_text, failure_code)
1643 raise
1645 output_of_all_runs += output
1646 if self.opts.output_buildbot_annotations:
1647 print output
1649 if metric and self._IsBisectModeUsingMetric():
1650 metric_values += _ParseMetricValuesFromOutput(metric, output)
1651 # If we're bisecting on a metric (ie, changes in the mean or
1652 # standard deviation) and no metric values are produced, bail out.
1653 if not metric_values:
1654 break
1655 elif self._IsBisectModeReturnCode():
1656 metric_values.append(return_code)
1658 elapsed_minutes = (time.time() - start_time) / 60.0
1659 if elapsed_minutes >= self.opts.max_time_minutes:
1660 break
1662 if metric and len(metric_values) == 0:
1663 err_text = 'Metric %s was not found in the test output.' % metric
1664 # TODO(qyearsley): Consider also getting and displaying a list of metrics
1665 # that were found in the output here.
1666 return (err_text, failure_code, output_of_all_runs)
1668 # If we're bisecting on return codes, we're really just looking for zero vs
1669 # non-zero.
1670 values = {}
1671 if self._IsBisectModeReturnCode():
1672 # If any of the return codes is non-zero, output 1.
1673 overall_return_code = 0 if (
1674 all(current_value == 0 for current_value in metric_values)) else 1
1676 values = {
1677 'mean': overall_return_code,
1678 'std_err': 0.0,
1679 'std_dev': 0.0,
1680 'values': metric_values,
1683 print 'Results of performance test: Command returned with %d' % (
1684 overall_return_code)
1685 print
1686 elif metric:
1687 # Need to get the average value if there were multiple values.
1688 truncated_mean = math_utils.TruncatedMean(
1689 metric_values, self.opts.truncate_percent)
1690 standard_err = math_utils.StandardError(metric_values)
1691 standard_dev = math_utils.StandardDeviation(metric_values)
1693 if self._IsBisectModeStandardDeviation():
1694 metric_values = [standard_dev]
1696 values = {
1697 'mean': truncated_mean,
1698 'std_err': standard_err,
1699 'std_dev': standard_dev,
1700 'values': metric_values,
1703 print 'Results of performance test: %12f %12f' % (
1704 truncated_mean, standard_err)
1705 print
1706 return (values, success_code, output_of_all_runs)
1708 def FindAllRevisionsToSync(self, revision, depot):
1709 """Finds all dependent revisions and depots that need to be synced.
1711 For example skia is broken up into 3 git mirrors over skia/src,
1712 skia/gyp, and skia/include. To sync skia/src properly, one has to find
1713 the proper revisions in skia/gyp and skia/include.
1715 This is only useful in the git workflow, as an SVN depot may be split into
1716 multiple mirrors.
1718 Args:
1719 revision: The revision to sync to.
1720 depot: The depot in use at the moment (probably skia).
1722 Returns:
1723 A list of [depot, revision] pairs that need to be synced.
1725 revisions_to_sync = [[depot, revision]]
1727 is_base = ((depot == 'chromium') or (depot == 'cros') or
1728 (depot == 'android-chrome'))
1730 # Some SVN depots were split into multiple git depots, so we need to
1731 # figure out for each mirror which git revision to grab. There's no
1732 # guarantee that the SVN revision will exist for each of the dependent
1733 # depots, so we have to grep the git logs and grab the next earlier one.
1734 if (not is_base
1735 and DEPOT_DEPS_NAME[depot]['depends']
1736 and self.source_control.IsGit()):
1737 svn_rev = self.source_control.SVNFindRev(revision)
1739 for d in DEPOT_DEPS_NAME[depot]['depends']:
1740 self.ChangeToDepotWorkingDirectory(d)
1742 dependant_rev = self.source_control.ResolveToRevision(
1743 svn_rev, d, DEPOT_DEPS_NAME, -1000)
1745 if dependant_rev:
1746 revisions_to_sync.append([d, dependant_rev])
1748 num_resolved = len(revisions_to_sync)
1749 num_needed = len(DEPOT_DEPS_NAME[depot]['depends'])
1751 self.ChangeToDepotWorkingDirectory(depot)
1753 if not ((num_resolved - 1) == num_needed):
1754 return None
1756 return revisions_to_sync
1758 def PerformPreBuildCleanup(self):
1759 """Performs cleanup between runs."""
1760 print 'Cleaning up between runs.'
1761 print
1763 # Leaving these .pyc files around between runs may disrupt some perf tests.
1764 for (path, _, files) in os.walk(self.src_cwd):
1765 for cur_file in files:
1766 if cur_file.endswith('.pyc'):
1767 path_to_file = os.path.join(path, cur_file)
1768 os.remove(path_to_file)
1770 def PerformWebkitDirectoryCleanup(self, revision):
1771 """Cleans up the Webkit directory before syncing another revision.
1773 If the script is switching between Blink and WebKit during bisect,
1774 its faster to just delete the directory rather than leave it up to git
1775 to sync.
1777 Returns:
1778 True if successful.
1780 if not self.source_control.CheckoutFileAtRevision(
1781 bisect_utils.FILE_DEPS_GIT, revision, cwd=self.src_cwd):
1782 return False
1784 cwd = os.getcwd()
1785 os.chdir(self.src_cwd)
1787 is_blink = bisect_utils.IsDepsFileBlink()
1789 os.chdir(cwd)
1791 if not self.source_control.RevertFileToHead(
1792 bisect_utils.FILE_DEPS_GIT):
1793 return False
1795 if self.was_blink != is_blink:
1796 self.was_blink = is_blink
1797 # Removes third_party/Webkit directory.
1798 return bisect_utils.RemoveThirdPartyDirectory('Webkit')
1799 return True
1801 def PerformCrosChrootCleanup(self):
1802 """Deletes the chroot.
1804 Returns:
1805 True if successful.
1807 cwd = os.getcwd()
1808 self.ChangeToDepotWorkingDirectory('cros')
1809 cmd = [bisect_utils.CROS_SDK_PATH, '--delete']
1810 return_code = bisect_utils.RunProcess(cmd)
1811 os.chdir(cwd)
1812 return not return_code
1814 def CreateCrosChroot(self):
1815 """Creates a new chroot.
1817 Returns:
1818 True if successful.
1820 cwd = os.getcwd()
1821 self.ChangeToDepotWorkingDirectory('cros')
1822 cmd = [bisect_utils.CROS_SDK_PATH, '--create']
1823 return_code = bisect_utils.RunProcess(cmd)
1824 os.chdir(cwd)
1825 return not return_code
1827 def PerformPreSyncCleanup(self, revision, depot):
1828 """Performs any necessary cleanup before syncing.
1830 Returns:
1831 True if successful.
1833 if depot == 'chromium' or depot == 'android-chrome':
1834 # Removes third_party/libjingle. At some point, libjingle was causing
1835 # issues syncing when using the git workflow (crbug.com/266324).
1836 os.chdir(self.src_cwd)
1837 if not bisect_utils.RemoveThirdPartyDirectory('libjingle'):
1838 return False
1839 # Removes third_party/skia. At some point, skia was causing
1840 # issues syncing when using the git workflow (crbug.com/377951).
1841 if not bisect_utils.RemoveThirdPartyDirectory('skia'):
1842 return False
1843 if depot == 'chromium':
1844 # The fast webkit cleanup doesn't work for android_chrome
1845 # The switch from Webkit to Blink that this deals with now happened
1846 # quite a long time ago so this is unlikely to be a problem.
1847 return self.PerformWebkitDirectoryCleanup(revision)
1848 elif depot == 'cros':
1849 return self.PerformCrosChrootCleanup()
1850 return True
1852 def RunPostSync(self, depot):
1853 """Performs any work after syncing.
1855 Returns:
1856 True if successful.
1858 if self.opts.target_platform == 'android':
1859 if not builder.SetupAndroidBuildEnvironment(self.opts,
1860 path_to_src=self.src_cwd):
1861 return False
1863 if depot == 'cros':
1864 return self.CreateCrosChroot()
1865 else:
1866 return self.RunGClientHooks()
1867 return True
1869 def ShouldSkipRevision(self, depot, revision):
1870 """Checks whether a particular revision can be safely skipped.
1872 Some commits can be safely skipped (such as a DEPS roll), since the tool
1873 is git based those changes would have no effect.
1875 Args:
1876 depot: The depot being bisected.
1877 revision: Current revision we're synced to.
1879 Returns:
1880 True if we should skip building/testing this revision.
1882 if depot == 'chromium':
1883 if self.source_control.IsGit():
1884 cmd = ['diff-tree', '--no-commit-id', '--name-only', '-r', revision]
1885 output = bisect_utils.CheckRunGit(cmd)
1887 files = output.splitlines()
1889 if len(files) == 1 and files[0] == 'DEPS':
1890 return True
1892 return False
1894 def SyncBuildAndRunRevision(self, revision, depot, command_to_run, metric,
1895 skippable=False):
1896 """Performs a full sync/build/run of the specified revision.
1898 Args:
1899 revision: The revision to sync to.
1900 depot: The depot that's being used at the moment (src, webkit, etc.)
1901 command_to_run: The command to execute the performance test.
1902 metric: The performance metric being tested.
1904 Returns:
1905 On success, a tuple containing the results of the performance test.
1906 Otherwise, a tuple with the error message.
1908 sync_client = None
1909 if depot == 'chromium' or depot == 'android-chrome':
1910 sync_client = 'gclient'
1911 elif depot == 'cros':
1912 sync_client = 'repo'
1914 revisions_to_sync = self.FindAllRevisionsToSync(revision, depot)
1916 if not revisions_to_sync:
1917 return ('Failed to resolve dependent depots.', BUILD_RESULT_FAIL)
1919 if not self.PerformPreSyncCleanup(revision, depot):
1920 return ('Failed to perform pre-sync cleanup.', BUILD_RESULT_FAIL)
1922 success = True
1924 if not self.opts.debug_ignore_sync:
1925 for r in revisions_to_sync:
1926 self.ChangeToDepotWorkingDirectory(r[0])
1928 if sync_client:
1929 self.PerformPreBuildCleanup()
1931 # If you're using gclient to sync, you need to specify the depot you
1932 # want so that all the dependencies sync properly as well.
1933 # i.e. gclient sync src@<SHA1>
1934 current_revision = r[1]
1935 if sync_client == 'gclient':
1936 current_revision = '%s@%s' % (DEPOT_DEPS_NAME[depot]['src'],
1937 current_revision)
1938 if not self.source_control.SyncToRevision(current_revision,
1939 sync_client):
1940 success = False
1942 break
1944 if success:
1945 success = self.RunPostSync(depot)
1946 if success:
1947 if skippable and self.ShouldSkipRevision(depot, revision):
1948 return ('Skipped revision: [%s]' % str(revision),
1949 BUILD_RESULT_SKIPPED)
1951 start_build_time = time.time()
1952 if self.BuildCurrentRevision(depot, revision):
1953 after_build_time = time.time()
1954 # Hack to support things that got changed.
1955 command_to_run = self.GetCompatibleCommand(
1956 command_to_run, revision, depot)
1957 results = self.RunPerformanceTestAndParseResults(command_to_run,
1958 metric)
1959 # Restore build output directory once the tests are done, to avoid
1960 # any discrepancies.
1961 if self.IsDownloadable(depot) and revision:
1962 self.BackupOrRestoreOutputdirectory(restore=True)
1964 if results[1] == 0:
1965 external_revisions = self._Get3rdPartyRevisions(depot)
1967 if not external_revisions is None:
1968 return (results[0], results[1], external_revisions,
1969 time.time() - after_build_time, after_build_time -
1970 start_build_time)
1971 else:
1972 return ('Failed to parse DEPS file for external revisions.',
1973 BUILD_RESULT_FAIL)
1974 else:
1975 return results
1976 else:
1977 return ('Failed to build revision: [%s]' % str(revision),
1978 BUILD_RESULT_FAIL)
1979 else:
1980 return ('Failed to run [gclient runhooks].', BUILD_RESULT_FAIL)
1981 else:
1982 return ('Failed to sync revision: [%s]' % str(revision),
1983 BUILD_RESULT_FAIL)
1985 def _CheckIfRunPassed(self, current_value, known_good_value, known_bad_value):
1986 """Given known good and bad values, decide if the current_value passed
1987 or failed.
1989 Args:
1990 current_value: The value of the metric being checked.
1991 known_bad_value: The reference value for a "failed" run.
1992 known_good_value: The reference value for a "passed" run.
1994 Returns:
1995 True if the current_value is closer to the known_good_value than the
1996 known_bad_value.
1998 if self.opts.bisect_mode == BISECT_MODE_STD_DEV:
1999 dist_to_good_value = abs(current_value['std_dev'] -
2000 known_good_value['std_dev'])
2001 dist_to_bad_value = abs(current_value['std_dev'] -
2002 known_bad_value['std_dev'])
2003 else:
2004 dist_to_good_value = abs(current_value['mean'] - known_good_value['mean'])
2005 dist_to_bad_value = abs(current_value['mean'] - known_bad_value['mean'])
2007 return dist_to_good_value < dist_to_bad_value
2009 def _GetDepotDirectory(self, depot_name):
2010 if depot_name == 'chromium':
2011 return self.src_cwd
2012 elif depot_name == 'cros':
2013 return self.cros_cwd
2014 elif depot_name in DEPOT_NAMES:
2015 return self.depot_cwd[depot_name]
2016 else:
2017 assert False, ('Unknown depot [ %s ] encountered. Possibly a new one '
2018 'was added without proper support?' % depot_name)
2020 def ChangeToDepotWorkingDirectory(self, depot_name):
2021 """Given a depot, changes to the appropriate working directory.
2023 Args:
2024 depot_name: The name of the depot (see DEPOT_NAMES).
2026 os.chdir(self._GetDepotDirectory(depot_name))
2028 def _FillInV8BleedingEdgeInfo(self, min_revision_data, max_revision_data):
2029 r1 = self._GetNearestV8BleedingEdgeFromTrunk(min_revision_data['revision'],
2030 search_forward=True)
2031 r2 = self._GetNearestV8BleedingEdgeFromTrunk(max_revision_data['revision'],
2032 search_forward=False)
2033 min_revision_data['external']['v8_bleeding_edge'] = r1
2034 max_revision_data['external']['v8_bleeding_edge'] = r2
2036 if (not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2037 min_revision_data['revision'])
2038 or not self._GetV8BleedingEdgeFromV8TrunkIfMappable(
2039 max_revision_data['revision'])):
2040 self.warnings.append(
2041 'Trunk revisions in V8 did not map directly to bleeding_edge. '
2042 'Attempted to expand the range to find V8 rolls which did map '
2043 'directly to bleeding_edge revisions, but results might not be '
2044 'valid.')
2046 def _FindNextDepotToBisect(
2047 self, current_depot, min_revision_data, max_revision_data):
2048 """Decides which depot the script should dive into next (if any).
2050 Args:
2051 current_depot: Current depot being bisected.
2052 min_revision_data: Data about the earliest revision in the bisect range.
2053 max_revision_data: Data about the latest revision in the bisect range.
2055 Returns:
2056 Name of the depot to bisect next, or None.
2058 external_depot = None
2059 for next_depot in DEPOT_NAMES:
2060 if DEPOT_DEPS_NAME[next_depot].has_key('platform'):
2061 if DEPOT_DEPS_NAME[next_depot]['platform'] != os.name:
2062 continue
2064 if not (DEPOT_DEPS_NAME[next_depot]['recurse']
2065 and min_revision_data['depot']
2066 in DEPOT_DEPS_NAME[next_depot]['from']):
2067 continue
2069 if current_depot == 'v8':
2070 # We grab the bleeding_edge info here rather than earlier because we
2071 # finally have the revision range. From that we can search forwards and
2072 # backwards to try to match trunk revisions to bleeding_edge.
2073 self._FillInV8BleedingEdgeInfo(min_revision_data, max_revision_data)
2075 if (min_revision_data['external'].get(next_depot) ==
2076 max_revision_data['external'].get(next_depot)):
2077 continue
2079 if (min_revision_data['external'].get(next_depot) and
2080 max_revision_data['external'].get(next_depot)):
2081 external_depot = next_depot
2082 break
2084 return external_depot
2086 def PrepareToBisectOnDepot(
2087 self, current_depot, end_revision, start_revision, previous_revision):
2088 """Changes to the appropriate directory and gathers a list of revisions
2089 to bisect between |start_revision| and |end_revision|.
2091 Args:
2092 current_depot: The depot we want to bisect.
2093 end_revision: End of the revision range.
2094 start_revision: Start of the revision range.
2095 previous_revision: The last revision we synced to on |previous_depot|.
2097 Returns:
2098 A list containing the revisions between |start_revision| and
2099 |end_revision| inclusive.
2101 # Change into working directory of external library to run
2102 # subsequent commands.
2103 self.ChangeToDepotWorkingDirectory(current_depot)
2105 # V8 (and possibly others) is merged in periodically. Bisecting
2106 # this directory directly won't give much good info.
2107 if DEPOT_DEPS_NAME[current_depot].has_key('custom_deps'):
2108 config_path = os.path.join(self.src_cwd, '..')
2109 if bisect_utils.RunGClientAndCreateConfig(self.opts,
2110 DEPOT_DEPS_NAME[current_depot]['custom_deps'], cwd=config_path):
2111 return []
2112 if bisect_utils.RunGClient(
2113 ['sync', '--revision', previous_revision], cwd=self.src_cwd):
2114 return []
2116 if current_depot == 'v8_bleeding_edge':
2117 self.ChangeToDepotWorkingDirectory('chromium')
2119 shutil.move('v8', 'v8.bak')
2120 shutil.move('v8_bleeding_edge', 'v8')
2122 self.cleanup_commands.append(['mv', 'v8', 'v8_bleeding_edge'])
2123 self.cleanup_commands.append(['mv', 'v8.bak', 'v8'])
2125 self.depot_cwd['v8_bleeding_edge'] = os.path.join(self.src_cwd, 'v8')
2126 self.depot_cwd['v8'] = os.path.join(self.src_cwd, 'v8.bak')
2128 self.ChangeToDepotWorkingDirectory(current_depot)
2130 depot_revision_list = self.GetRevisionList(current_depot,
2131 end_revision,
2132 start_revision)
2134 self.ChangeToDepotWorkingDirectory('chromium')
2136 return depot_revision_list
2138 def GatherReferenceValues(self, good_rev, bad_rev, cmd, metric, target_depot):
2139 """Gathers reference values by running the performance tests on the
2140 known good and bad revisions.
2142 Args:
2143 good_rev: The last known good revision where the performance regression
2144 has not occurred yet.
2145 bad_rev: A revision where the performance regression has already occurred.
2146 cmd: The command to execute the performance test.
2147 metric: The metric being tested for regression.
2149 Returns:
2150 A tuple with the results of building and running each revision.
2152 bad_run_results = self.SyncBuildAndRunRevision(
2153 bad_rev, target_depot, cmd, metric)
2155 good_run_results = None
2157 if not bad_run_results[1]:
2158 good_run_results = self.SyncBuildAndRunRevision(
2159 good_rev, target_depot, cmd, metric)
2161 return (bad_run_results, good_run_results)
2163 def PrintRevisionsToBisectMessage(self, revision_list, depot):
2164 if self.opts.output_buildbot_annotations:
2165 step_name = 'Bisection Range: [%s - %s]' % (
2166 revision_list[len(revision_list)-1], revision_list[0])
2167 bisect_utils.OutputAnnotationStepStart(step_name)
2169 print
2170 print 'Revisions to bisect on [%s]:' % depot
2171 for revision_id in revision_list:
2172 print ' -> %s' % (revision_id, )
2173 print
2175 if self.opts.output_buildbot_annotations:
2176 bisect_utils.OutputAnnotationStepClosed()
2178 def NudgeRevisionsIfDEPSChange(self, bad_revision, good_revision):
2179 """Checks to see if changes to DEPS file occurred, and that the revision
2180 range also includes the change to .DEPS.git. If it doesn't, attempts to
2181 expand the revision range to include it.
2183 Args:
2184 bad_rev: First known bad revision.
2185 good_revision: Last known good revision.
2187 Returns:
2188 A tuple with the new bad and good revisions.
2190 if self.source_control.IsGit() and self.opts.target_platform == 'chromium':
2191 changes_to_deps = self.source_control.QueryFileRevisionHistory(
2192 'DEPS', good_revision, bad_revision)
2194 if changes_to_deps:
2195 # DEPS file was changed, search from the oldest change to DEPS file to
2196 # bad_revision to see if there are matching .DEPS.git changes.
2197 oldest_deps_change = changes_to_deps[-1]
2198 changes_to_gitdeps = self.source_control.QueryFileRevisionHistory(
2199 bisect_utils.FILE_DEPS_GIT, oldest_deps_change, bad_revision)
2201 if len(changes_to_deps) != len(changes_to_gitdeps):
2202 # Grab the timestamp of the last DEPS change
2203 cmd = ['log', '--format=%ct', '-1', changes_to_deps[0]]
2204 output = bisect_utils.CheckRunGit(cmd)
2205 commit_time = int(output)
2207 # Try looking for a commit that touches the .DEPS.git file in the
2208 # next 15 minutes after the DEPS file change.
2209 cmd = ['log', '--format=%H', '-1',
2210 '--before=%d' % (commit_time + 900), '--after=%d' % commit_time,
2211 'origin/master', bisect_utils.FILE_DEPS_GIT]
2212 output = bisect_utils.CheckRunGit(cmd)
2213 output = output.strip()
2214 if output:
2215 self.warnings.append('Detected change to DEPS and modified '
2216 'revision range to include change to .DEPS.git')
2217 return (output, good_revision)
2218 else:
2219 self.warnings.append('Detected change to DEPS but couldn\'t find '
2220 'matching change to .DEPS.git')
2221 return (bad_revision, good_revision)
2223 def CheckIfRevisionsInProperOrder(
2224 self, target_depot, good_revision, bad_revision):
2225 """Checks that |good_revision| is an earlier revision than |bad_revision|.
2227 Args:
2228 good_revision: Number/tag of the known good revision.
2229 bad_revision: Number/tag of the known bad revision.
2231 Returns:
2232 True if the revisions are in the proper order (good earlier than bad).
2234 if self.source_control.IsGit() and target_depot != 'cros':
2235 cmd = ['log', '--format=%ct', '-1', good_revision]
2236 cwd = self._GetDepotDirectory(target_depot)
2238 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
2239 good_commit_time = int(output)
2241 cmd = ['log', '--format=%ct', '-1', bad_revision]
2242 output = bisect_utils.CheckRunGit(cmd, cwd=cwd)
2243 bad_commit_time = int(output)
2245 return good_commit_time <= bad_commit_time
2246 else:
2247 # CrOS and SVN use integers.
2248 return int(good_revision) <= int(bad_revision)
2250 def CanPerformBisect(self, revision_to_check):
2251 """Checks whether a given revision is bisectable.
2253 Note: At present it checks whether a given revision is bisectable on
2254 android bots(refer crbug.com/385324).
2256 Args:
2257 revision_to_check: Known good revision.
2259 Returns:
2260 A dictionary indicating the result. If revision is not bisectable,
2261 this will contain the field "error", otherwise None.
2263 if self.opts.target_platform == 'android':
2264 revision_to_check = self.source_control.SVNFindRev(revision_to_check)
2265 if (bisect_utils.IsStringInt(revision_to_check)
2266 and revision_to_check < 265549):
2267 return {'error': (
2268 'Bisect cannot continue for the given revision range.\n'
2269 'It is impossible to bisect Android regressions '
2270 'prior to r265549, which allows the bisect bot to '
2271 'rely on Telemetry to do apk installation of the most recently '
2272 'built local ChromeShell(refer to crbug.com/385324).\n'
2273 'Please try bisecting revisions greater than or equal to r265549.')}
2274 return None
2276 def Run(self, command_to_run, bad_revision_in, good_revision_in, metric):
2277 """Given known good and bad revisions, run a binary search on all
2278 intermediate revisions to determine the CL where the performance regression
2279 occurred.
2281 Args:
2282 command_to_run: Specify the command to execute the performance test.
2283 good_revision: Number/tag of the known good revision.
2284 bad_revision: Number/tag of the known bad revision.
2285 metric: The performance metric to monitor.
2287 Returns:
2288 A dict with 2 members, 'revision_data' and 'error'. On success,
2289 'revision_data' will contain a dict mapping revision ids to
2290 data about that revision. Each piece of revision data consists of a
2291 dict with the following keys:
2293 'passed': Represents whether the performance test was successful at
2294 that revision. Possible values include: 1 (passed), 0 (failed),
2295 '?' (skipped), 'F' (build failed).
2296 'depot': The depot that this revision is from (i.e. WebKit)
2297 'external': If the revision is a 'src' revision, 'external' contains
2298 the revisions of each of the external libraries.
2299 'sort': A sort value for sorting the dict in order of commits.
2301 For example:
2303 'error':None,
2304 'revision_data':
2306 'CL #1':
2308 'passed': False,
2309 'depot': 'chromium',
2310 'external': None,
2311 'sort': 0
2316 If an error occurred, the 'error' field will contain the message and
2317 'revision_data' will be empty.
2319 results = {
2320 'revision_data' : {},
2321 'error' : None,
2324 # Choose depot to bisect first
2325 target_depot = 'chromium'
2326 if self.opts.target_platform == 'cros':
2327 target_depot = 'cros'
2328 elif self.opts.target_platform == 'android-chrome':
2329 target_depot = 'android-chrome'
2331 cwd = os.getcwd()
2332 self.ChangeToDepotWorkingDirectory(target_depot)
2334 # If they passed SVN revisions, we can try match them to git SHA1 hashes.
2335 bad_revision = self.source_control.ResolveToRevision(
2336 bad_revision_in, target_depot, DEPOT_DEPS_NAME, 100)
2337 good_revision = self.source_control.ResolveToRevision(
2338 good_revision_in, target_depot, DEPOT_DEPS_NAME, -100)
2340 os.chdir(cwd)
2342 if bad_revision is None:
2343 results['error'] = 'Couldn\'t resolve [%s] to SHA1.' % bad_revision_in
2344 return results
2346 if good_revision is None:
2347 results['error'] = 'Couldn\'t resolve [%s] to SHA1.' % good_revision_in
2348 return results
2350 # Check that they didn't accidentally swap good and bad revisions.
2351 if not self.CheckIfRevisionsInProperOrder(
2352 target_depot, good_revision, bad_revision):
2353 results['error'] = ('bad_revision < good_revision, did you swap these '
2354 'by mistake?')
2355 return results
2357 bad_revision, good_revision = self.NudgeRevisionsIfDEPSChange(
2358 bad_revision, good_revision)
2360 if self.opts.output_buildbot_annotations:
2361 bisect_utils.OutputAnnotationStepStart('Gathering Revisions')
2363 cannot_bisect = self.CanPerformBisect(good_revision)
2364 if cannot_bisect:
2365 results['error'] = cannot_bisect.get('error')
2366 return results
2368 print 'Gathering revision range for bisection.'
2369 # Retrieve a list of revisions to do bisection on.
2370 src_revision_list = self.GetRevisionList(
2371 target_depot, bad_revision, good_revision)
2373 if self.opts.output_buildbot_annotations:
2374 bisect_utils.OutputAnnotationStepClosed()
2376 if src_revision_list:
2377 # revision_data will store information about a revision such as the
2378 # depot it came from, the webkit/V8 revision at that time,
2379 # performance timing, build state, etc...
2380 revision_data = results['revision_data']
2382 # revision_list is the list we're binary searching through at the moment.
2383 revision_list = []
2385 sort_key_ids = 0
2387 for current_revision_id in src_revision_list:
2388 sort_key_ids += 1
2390 revision_data[current_revision_id] = {
2391 'value' : None,
2392 'passed' : '?',
2393 'depot' : target_depot,
2394 'external' : None,
2395 'perf_time' : 0,
2396 'build_time' : 0,
2397 'sort' : sort_key_ids,
2399 revision_list.append(current_revision_id)
2401 min_revision = 0
2402 max_revision = len(revision_list) - 1
2404 self.PrintRevisionsToBisectMessage(revision_list, target_depot)
2406 if self.opts.output_buildbot_annotations:
2407 bisect_utils.OutputAnnotationStepStart('Gathering Reference Values')
2409 print 'Gathering reference values for bisection.'
2411 # Perform the performance tests on the good and bad revisions, to get
2412 # reference values.
2413 bad_results, good_results = self.GatherReferenceValues(good_revision,
2414 bad_revision,
2415 command_to_run,
2416 metric,
2417 target_depot)
2419 if self.opts.output_buildbot_annotations:
2420 bisect_utils.OutputAnnotationStepClosed()
2422 if bad_results[1]:
2423 results['error'] = ('An error occurred while building and running '
2424 'the \'bad\' reference value. The bisect cannot continue without '
2425 'a working \'bad\' revision to start from.\n\nError: %s' %
2426 bad_results[0])
2427 return results
2429 if good_results[1]:
2430 results['error'] = ('An error occurred while building and running '
2431 'the \'good\' reference value. The bisect cannot continue without '
2432 'a working \'good\' revision to start from.\n\nError: %s' %
2433 good_results[0])
2434 return results
2437 # We need these reference values to determine if later runs should be
2438 # classified as pass or fail.
2439 known_bad_value = bad_results[0]
2440 known_good_value = good_results[0]
2442 # Can just mark the good and bad revisions explicitly here since we
2443 # already know the results.
2444 bad_revision_data = revision_data[revision_list[0]]
2445 bad_revision_data['external'] = bad_results[2]
2446 bad_revision_data['perf_time'] = bad_results[3]
2447 bad_revision_data['build_time'] = bad_results[4]
2448 bad_revision_data['passed'] = False
2449 bad_revision_data['value'] = known_bad_value
2451 good_revision_data = revision_data[revision_list[max_revision]]
2452 good_revision_data['external'] = good_results[2]
2453 good_revision_data['perf_time'] = good_results[3]
2454 good_revision_data['build_time'] = good_results[4]
2455 good_revision_data['passed'] = True
2456 good_revision_data['value'] = known_good_value
2458 next_revision_depot = target_depot
2460 while True:
2461 if not revision_list:
2462 break
2464 min_revision_data = revision_data[revision_list[min_revision]]
2465 max_revision_data = revision_data[revision_list[max_revision]]
2467 if max_revision - min_revision <= 1:
2468 current_depot = min_revision_data['depot']
2469 if min_revision_data['passed'] == '?':
2470 next_revision_index = min_revision
2471 elif max_revision_data['passed'] == '?':
2472 next_revision_index = max_revision
2473 elif current_depot in ['android-chrome', 'cros', 'chromium', 'v8']:
2474 previous_revision = revision_list[min_revision]
2475 # If there were changes to any of the external libraries we track,
2476 # should bisect the changes there as well.
2477 external_depot = self._FindNextDepotToBisect(
2478 current_depot, min_revision_data, max_revision_data)
2480 # If there was no change in any of the external depots, the search
2481 # is over.
2482 if not external_depot:
2483 if current_depot == 'v8':
2484 self.warnings.append('Unfortunately, V8 bisection couldn\'t '
2485 'continue any further. The script can only bisect into '
2486 'V8\'s bleeding_edge repository if both the current and '
2487 'previous revisions in trunk map directly to revisions in '
2488 'bleeding_edge.')
2489 break
2491 earliest_revision = max_revision_data['external'][external_depot]
2492 latest_revision = min_revision_data['external'][external_depot]
2494 new_revision_list = self.PrepareToBisectOnDepot(
2495 external_depot, latest_revision, earliest_revision,
2496 previous_revision)
2498 if not new_revision_list:
2499 results['error'] = ('An error occurred attempting to retrieve '
2500 'revision range: [%s..%s]' %
2501 (earliest_revision, latest_revision))
2502 return results
2504 _AddRevisionsIntoRevisionData(
2505 new_revision_list, external_depot, min_revision_data['sort'],
2506 revision_data)
2508 # Reset the bisection and perform it on the newly inserted
2509 # changelists.
2510 revision_list = new_revision_list
2511 min_revision = 0
2512 max_revision = len(revision_list) - 1
2513 sort_key_ids += len(revision_list)
2515 print ('Regression in metric %s appears to be the result of '
2516 'changes in [%s].' % (metric, external_depot))
2518 self.PrintRevisionsToBisectMessage(revision_list, external_depot)
2520 continue
2521 else:
2522 break
2523 else:
2524 next_revision_index = (int((max_revision - min_revision) / 2) +
2525 min_revision)
2527 next_revision_id = revision_list[next_revision_index]
2528 next_revision_data = revision_data[next_revision_id]
2529 next_revision_depot = next_revision_data['depot']
2531 self.ChangeToDepotWorkingDirectory(next_revision_depot)
2533 if self.opts.output_buildbot_annotations:
2534 step_name = 'Working on [%s]' % next_revision_id
2535 bisect_utils.OutputAnnotationStepStart(step_name)
2537 print 'Working on revision: [%s]' % next_revision_id
2539 run_results = self.SyncBuildAndRunRevision(next_revision_id,
2540 next_revision_depot,
2541 command_to_run,
2542 metric, skippable=True)
2544 # If the build is successful, check whether or not the metric
2545 # had regressed.
2546 if not run_results[1]:
2547 if len(run_results) > 2:
2548 next_revision_data['external'] = run_results[2]
2549 next_revision_data['perf_time'] = run_results[3]
2550 next_revision_data['build_time'] = run_results[4]
2552 passed_regression = self._CheckIfRunPassed(run_results[0],
2553 known_good_value,
2554 known_bad_value)
2556 next_revision_data['passed'] = passed_regression
2557 next_revision_data['value'] = run_results[0]
2559 if passed_regression:
2560 max_revision = next_revision_index
2561 else:
2562 min_revision = next_revision_index
2563 else:
2564 if run_results[1] == BUILD_RESULT_SKIPPED:
2565 next_revision_data['passed'] = 'Skipped'
2566 elif run_results[1] == BUILD_RESULT_FAIL:
2567 next_revision_data['passed'] = 'Build Failed'
2569 print run_results[0]
2571 # If the build is broken, remove it and redo search.
2572 revision_list.pop(next_revision_index)
2574 max_revision -= 1
2576 if self.opts.output_buildbot_annotations:
2577 self._PrintPartialResults(results)
2578 bisect_utils.OutputAnnotationStepClosed()
2579 else:
2580 # Weren't able to sync and retrieve the revision range.
2581 results['error'] = ('An error occurred attempting to retrieve revision '
2582 'range: [%s..%s]' % (good_revision, bad_revision))
2584 return results
2586 def _PrintPartialResults(self, results_dict):
2587 revision_data = results_dict['revision_data']
2588 revision_data_sorted = sorted(revision_data.iteritems(),
2589 key = lambda x: x[1]['sort'])
2590 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2592 self._PrintTestedCommitsTable(revision_data_sorted,
2593 results_dict['first_working_revision'],
2594 results_dict['last_broken_revision'],
2595 100, final_step=False)
2597 def _ConfidenceLevelStatus(self, results_dict):
2598 if not results_dict['confidence']:
2599 return None
2600 confidence_status = 'Successful with %(level)s confidence%(warning)s.'
2601 if results_dict['confidence'] >= HIGH_CONFIDENCE:
2602 level = 'high'
2603 else:
2604 level = 'low'
2605 warning = ' and warnings'
2606 if not self.warnings:
2607 warning = ''
2608 return confidence_status % {'level': level, 'warning': warning}
2610 def _GetViewVCLinkFromDepotAndHash(self, cl, depot):
2611 info = self.source_control.QueryRevisionInfo(cl,
2612 self._GetDepotDirectory(depot))
2613 if depot and DEPOT_DEPS_NAME[depot].has_key('viewvc'):
2614 try:
2615 # Format is "git-svn-id: svn://....@123456 <other data>"
2616 svn_line = [i for i in info['body'].splitlines() if 'git-svn-id:' in i]
2617 svn_revision = svn_line[0].split('@')
2618 svn_revision = svn_revision[1].split(' ')[0]
2619 return DEPOT_DEPS_NAME[depot]['viewvc'] + svn_revision
2620 except IndexError:
2621 return ''
2622 return ''
2624 def _PrintRevisionInfo(self, cl, info, depot=None):
2625 email_info = ''
2626 if not info['email'].startswith(info['author']):
2627 email_info = '\nEmail : %s' % info['email']
2628 commit_link = self._GetViewVCLinkFromDepotAndHash(cl, depot)
2629 if commit_link:
2630 commit_info = '\nLink : %s' % commit_link
2631 else:
2632 commit_info = ('\nFailed to parse SVN revision from body:\n%s' %
2633 info['body'])
2634 print RESULTS_REVISION_INFO % {
2635 'subject': info['subject'],
2636 'author': info['author'],
2637 'email_info': email_info,
2638 'commit_info': commit_info,
2639 'cl': cl,
2640 'cl_date': info['date']
2643 def _PrintTestedCommitsHeader(self):
2644 if self.opts.bisect_mode == BISECT_MODE_MEAN:
2645 _PrintTableRow(
2646 [20, 70, 14, 12, 13],
2647 ['Depot', 'Commit SHA', 'Mean', 'Std. Error', 'State'])
2648 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2649 _PrintTableRow(
2650 [20, 70, 14, 12, 13],
2651 ['Depot', 'Commit SHA', 'Std. Error', 'Mean', 'State'])
2652 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
2653 _PrintTableRow(
2654 [20, 70, 14, 13],
2655 ['Depot', 'Commit SHA', 'Return Code', 'State'])
2656 else:
2657 assert False, 'Invalid bisect_mode specified.'
2659 def _PrintTestedCommitsEntry(self, current_data, cl_link, state_str):
2660 if self.opts.bisect_mode == BISECT_MODE_MEAN:
2661 std_error = '+-%.02f' % current_data['value']['std_err']
2662 mean = '%.02f' % current_data['value']['mean']
2663 _PrintTableRow(
2664 [20, 70, 12, 14, 13],
2665 [current_data['depot'], cl_link, mean, std_error, state_str])
2666 elif self.opts.bisect_mode == BISECT_MODE_STD_DEV:
2667 std_error = '+-%.02f' % current_data['value']['std_err']
2668 mean = '%.02f' % current_data['value']['mean']
2669 _PrintTableRow(
2670 [20, 70, 12, 14, 13],
2671 [current_data['depot'], cl_link, std_error, mean, state_str])
2672 elif self.opts.bisect_mode == BISECT_MODE_RETURN_CODE:
2673 mean = '%d' % current_data['value']['mean']
2674 _PrintTableRow(
2675 [20, 70, 14, 13],
2676 [current_data['depot'], cl_link, mean, state_str])
2678 def _PrintTestedCommitsTable(
2679 self, revision_data_sorted, first_working_revision, last_broken_revision,
2680 confidence, final_step=True):
2681 print
2682 if final_step:
2683 print '===== TESTED COMMITS ====='
2684 else:
2685 print '===== PARTIAL RESULTS ====='
2686 self._PrintTestedCommitsHeader()
2687 state = 0
2688 for current_id, current_data in revision_data_sorted:
2689 if current_data['value']:
2690 if (current_id == last_broken_revision or
2691 current_id == first_working_revision):
2692 # If confidence is too low, don't add this empty line since it's
2693 # used to put focus on a suspected CL.
2694 if confidence and final_step:
2695 print
2696 state += 1
2697 if state == 2 and not final_step:
2698 # Just want a separation between "bad" and "good" cl's.
2699 print
2701 state_str = 'Bad'
2702 if state == 1 and final_step:
2703 state_str = 'Suspected CL'
2704 elif state == 2:
2705 state_str = 'Good'
2707 # If confidence is too low, don't bother outputting good/bad.
2708 if not confidence:
2709 state_str = ''
2710 state_str = state_str.center(13, ' ')
2712 cl_link = self._GetViewVCLinkFromDepotAndHash(current_id,
2713 current_data['depot'])
2714 if not cl_link:
2715 cl_link = current_id
2716 self._PrintTestedCommitsEntry(current_data, cl_link, state_str)
2718 def _PrintReproSteps(self):
2719 """Prints out a section of the results explaining how to run the test.
2721 This message includes the command used to run the test.
2723 command = '$ ' + self.opts.command
2724 if bisect_utils.IsTelemetryCommand(self.opts.command):
2725 command += ('\nAlso consider passing --profiler=list to see available '
2726 'profilers.')
2727 print REPRO_STEPS_LOCAL % {'command': command}
2728 print REPRO_STEPS_TRYJOB % {'command': command}
2730 def _PrintOtherRegressions(self, other_regressions, revision_data):
2731 """Prints a section of the results about other potential regressions."""
2732 print
2733 print 'Other regressions may have occurred:'
2734 print ' %8s %70s %10s' % ('Depot'.center(8, ' '),
2735 'Range'.center(70, ' '), 'Confidence'.center(10, ' '))
2736 for regression in other_regressions:
2737 current_id, previous_id, confidence = regression
2738 current_data = revision_data[current_id]
2739 previous_data = revision_data[previous_id]
2741 current_link = self._GetViewVCLinkFromDepotAndHash(current_id,
2742 current_data['depot'])
2743 previous_link = self._GetViewVCLinkFromDepotAndHash(previous_id,
2744 previous_data['depot'])
2746 # If we can't map it to a viewable URL, at least show the original hash.
2747 if not current_link:
2748 current_link = current_id
2749 if not previous_link:
2750 previous_link = previous_id
2752 print ' %8s %70s %s' % (
2753 current_data['depot'], current_link,
2754 ('%d%%' % confidence).center(10, ' '))
2755 print ' %8s %70s' % (
2756 previous_data['depot'], previous_link)
2757 print
2759 def _GetResultsDict(self, revision_data, revision_data_sorted):
2760 # Find range where it possibly broke.
2761 first_working_revision = None
2762 first_working_revision_index = -1
2763 last_broken_revision = None
2764 last_broken_revision_index = -1
2766 culprit_revisions = []
2767 other_regressions = []
2768 regression_size = 0.0
2769 regression_std_err = 0.0
2770 confidence = 0.0
2772 for i in xrange(len(revision_data_sorted)):
2773 k, v = revision_data_sorted[i]
2774 if v['passed'] == 1:
2775 if not first_working_revision:
2776 first_working_revision = k
2777 first_working_revision_index = i
2779 if not v['passed']:
2780 last_broken_revision = k
2781 last_broken_revision_index = i
2783 if last_broken_revision != None and first_working_revision != None:
2784 broken_means = []
2785 for i in xrange(0, last_broken_revision_index + 1):
2786 if revision_data_sorted[i][1]['value']:
2787 broken_means.append(revision_data_sorted[i][1]['value']['values'])
2789 working_means = []
2790 for i in xrange(first_working_revision_index, len(revision_data_sorted)):
2791 if revision_data_sorted[i][1]['value']:
2792 working_means.append(revision_data_sorted[i][1]['value']['values'])
2794 # Flatten the lists to calculate mean of all values.
2795 working_mean = sum(working_means, [])
2796 broken_mean = sum(broken_means, [])
2798 # Calculate the approximate size of the regression
2799 mean_of_bad_runs = math_utils.Mean(broken_mean)
2800 mean_of_good_runs = math_utils.Mean(working_mean)
2802 regression_size = 100 * math_utils.RelativeChange(mean_of_good_runs,
2803 mean_of_bad_runs)
2804 if math.isnan(regression_size):
2805 regression_size = 'zero-to-nonzero'
2807 regression_std_err = math.fabs(math_utils.PooledStandardError(
2808 [working_mean, broken_mean]) /
2809 max(0.0001, min(mean_of_good_runs, mean_of_bad_runs))) * 100.0
2811 # Give a "confidence" in the bisect. At the moment we use how distinct the
2812 # values are before and after the last broken revision, and how noisy the
2813 # overall graph is.
2814 confidence = ConfidenceScore(working_means, broken_means)
2816 culprit_revisions = []
2818 cwd = os.getcwd()
2819 self.ChangeToDepotWorkingDirectory(
2820 revision_data[last_broken_revision]['depot'])
2822 if revision_data[last_broken_revision]['depot'] == 'cros':
2823 # Want to get a list of all the commits and what depots they belong
2824 # to so that we can grab info about each.
2825 cmd = ['repo', 'forall', '-c',
2826 'pwd ; git log --pretty=oneline --before=%d --after=%d' % (
2827 last_broken_revision, first_working_revision + 1)]
2828 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(cmd)
2830 changes = []
2831 assert not return_code, ('An error occurred while running '
2832 '"%s"' % ' '.join(cmd))
2833 last_depot = None
2834 cwd = os.getcwd()
2835 for l in output.split('\n'):
2836 if l:
2837 # Output will be in form:
2838 # /path_to_depot
2839 # /path_to_other_depot
2840 # <SHA1>
2841 # /path_again
2842 # <SHA1>
2843 # etc.
2844 if l[0] == '/':
2845 last_depot = l
2846 else:
2847 contents = l.split(' ')
2848 if len(contents) > 1:
2849 changes.append([last_depot, contents[0]])
2850 for c in changes:
2851 os.chdir(c[0])
2852 info = self.source_control.QueryRevisionInfo(c[1])
2853 culprit_revisions.append((c[1], info, None))
2854 else:
2855 for i in xrange(last_broken_revision_index, len(revision_data_sorted)):
2856 k, v = revision_data_sorted[i]
2857 if k == first_working_revision:
2858 break
2859 self.ChangeToDepotWorkingDirectory(v['depot'])
2860 info = self.source_control.QueryRevisionInfo(k)
2861 culprit_revisions.append((k, info, v['depot']))
2862 os.chdir(cwd)
2864 # Check for any other possible regression ranges.
2865 other_regressions = _FindOtherRegressions(
2866 revision_data_sorted, mean_of_bad_runs > mean_of_good_runs)
2868 return {
2869 'first_working_revision': first_working_revision,
2870 'last_broken_revision': last_broken_revision,
2871 'culprit_revisions': culprit_revisions,
2872 'other_regressions': other_regressions,
2873 'regression_size': regression_size,
2874 'regression_std_err': regression_std_err,
2875 'confidence': confidence,
2878 def _CheckForWarnings(self, results_dict):
2879 if len(results_dict['culprit_revisions']) > 1:
2880 self.warnings.append('Due to build errors, regression range could '
2881 'not be narrowed down to a single commit.')
2882 if self.opts.repeat_test_count == 1:
2883 self.warnings.append('Tests were only set to run once. This may '
2884 'be insufficient to get meaningful results.')
2885 if 0 < results_dict['confidence'] < HIGH_CONFIDENCE:
2886 self.warnings.append('Confidence is not high. Try bisecting again '
2887 'with increased repeat_count, larger range, or '
2888 'on another metric.')
2889 if not results_dict['confidence']:
2890 self.warnings.append('Confidence score is 0%. Try bisecting again on '
2891 'another platform or another metric.')
2893 def FormatAndPrintResults(self, bisect_results):
2894 """Prints the results from a bisection run in a readable format.
2896 Args:
2897 bisect_results: The results from a bisection test run.
2899 revision_data = bisect_results['revision_data']
2900 revision_data_sorted = sorted(revision_data.iteritems(),
2901 key = lambda x: x[1]['sort'])
2902 results_dict = self._GetResultsDict(revision_data, revision_data_sorted)
2904 self._CheckForWarnings(results_dict)
2906 if self.opts.output_buildbot_annotations:
2907 bisect_utils.OutputAnnotationStepStart('Build Status Per Revision')
2909 print
2910 print 'Full results of bisection:'
2911 for current_id, current_data in revision_data_sorted:
2912 build_status = current_data['passed']
2914 if type(build_status) is bool:
2915 if build_status:
2916 build_status = 'Good'
2917 else:
2918 build_status = 'Bad'
2920 print ' %20s %40s %s' % (current_data['depot'],
2921 current_id, build_status)
2922 print
2924 if self.opts.output_buildbot_annotations:
2925 bisect_utils.OutputAnnotationStepClosed()
2926 # The perf dashboard scrapes the "results" step in order to comment on
2927 # bugs. If you change this, please update the perf dashboard as well.
2928 bisect_utils.OutputAnnotationStepStart('Results')
2930 self._PrintBanner(results_dict)
2931 self._PrintWarnings()
2933 if results_dict['culprit_revisions'] and results_dict['confidence']:
2934 for culprit in results_dict['culprit_revisions']:
2935 cl, info, depot = culprit
2936 self._PrintRevisionInfo(cl, info, depot)
2937 if results_dict['other_regressions']:
2938 self._PrintOtherRegressions(results_dict['other_regressions'],
2939 revision_data)
2940 self._PrintTestedCommitsTable(revision_data_sorted,
2941 results_dict['first_working_revision'],
2942 results_dict['last_broken_revision'],
2943 results_dict['confidence'])
2944 _PrintStepTime(revision_data_sorted)
2945 self._PrintReproSteps()
2946 _PrintThankYou()
2947 if self.opts.output_buildbot_annotations:
2948 bisect_utils.OutputAnnotationStepClosed()
2950 def _PrintBanner(self, results_dict):
2951 if self._IsBisectModeReturnCode():
2952 metrics = 'N/A'
2953 change = 'Yes'
2954 else:
2955 metrics = '/'.join(self.opts.metric)
2956 change = '%.02f%% (+/-%.02f%%)' % (
2957 results_dict['regression_size'], results_dict['regression_std_err'])
2959 if results_dict['culprit_revisions'] and results_dict['confidence']:
2960 status = self._ConfidenceLevelStatus(results_dict)
2961 else:
2962 status = 'Failure, could not reproduce.'
2963 change = 'Bisect could not reproduce a change.'
2965 print RESULTS_BANNER % {
2966 'status': status,
2967 'command': self.opts.command,
2968 'metrics': metrics,
2969 'change': change,
2970 'confidence': results_dict['confidence'],
2973 def _PrintWarnings(self):
2974 """Prints a list of warning strings if there are any."""
2975 if not self.warnings:
2976 return
2977 print
2978 print 'WARNINGS:'
2979 for w in set(self.warnings):
2980 print ' ! %s' % w
2983 def _IsPlatformSupported():
2984 """Checks that this platform and build system are supported.
2986 Args:
2987 opts: The options parsed from the command line.
2989 Returns:
2990 True if the platform and build system are supported.
2992 # Haven't tested the script out on any other platforms yet.
2993 supported = ['posix', 'nt']
2994 return os.name in supported
2997 def RmTreeAndMkDir(path_to_dir, skip_makedir=False):
2998 """Removes the directory tree specified, and then creates an empty
2999 directory in the same location (if not specified to skip).
3001 Args:
3002 path_to_dir: Path to the directory tree.
3003 skip_makedir: Whether to skip creating empty directory, default is False.
3005 Returns:
3006 True if successful, False if an error occurred.
3008 try:
3009 if os.path.exists(path_to_dir):
3010 shutil.rmtree(path_to_dir)
3011 except OSError, e:
3012 if e.errno != errno.ENOENT:
3013 return False
3015 if not skip_makedir:
3016 return MaybeMakeDirectory(path_to_dir)
3018 return True
3021 def RemoveBuildFiles(build_type):
3022 """Removes build files from previous runs."""
3023 if RmTreeAndMkDir(os.path.join('out', build_type)):
3024 if RmTreeAndMkDir(os.path.join('build', build_type)):
3025 return True
3026 return False
3029 class BisectOptions(object):
3030 """Options to be used when running bisection."""
3031 def __init__(self):
3032 super(BisectOptions, self).__init__()
3034 self.target_platform = 'chromium'
3035 self.build_preference = None
3036 self.good_revision = None
3037 self.bad_revision = None
3038 self.use_goma = None
3039 self.goma_dir = None
3040 self.cros_board = None
3041 self.cros_remote_ip = None
3042 self.repeat_test_count = 20
3043 self.truncate_percent = 25
3044 self.max_time_minutes = 20
3045 self.metric = None
3046 self.command = None
3047 self.output_buildbot_annotations = None
3048 self.no_custom_deps = False
3049 self.working_directory = None
3050 self.extra_src = None
3051 self.debug_ignore_build = None
3052 self.debug_ignore_sync = None
3053 self.debug_ignore_perf_test = None
3054 self.gs_bucket = None
3055 self.target_arch = 'ia32'
3056 self.target_build_type = 'Release'
3057 self.builder_host = None
3058 self.builder_port = None
3059 self.bisect_mode = BISECT_MODE_MEAN
3061 @staticmethod
3062 def _CreateCommandLineParser():
3063 """Creates a parser with bisect options.
3065 Returns:
3066 An instance of optparse.OptionParser.
3068 usage = ('%prog [options] [-- chromium-options]\n'
3069 'Perform binary search on revision history to find a minimal '
3070 'range of revisions where a performance metric regressed.\n')
3072 parser = optparse.OptionParser(usage=usage)
3074 group = optparse.OptionGroup(parser, 'Bisect options')
3075 group.add_option('-c', '--command',
3076 type='str',
3077 help='A command to execute your performance test at' +
3078 ' each point in the bisection.')
3079 group.add_option('-b', '--bad_revision',
3080 type='str',
3081 help='A bad revision to start bisection. ' +
3082 'Must be later than good revision. May be either a git' +
3083 ' or svn revision.')
3084 group.add_option('-g', '--good_revision',
3085 type='str',
3086 help='A revision to start bisection where performance' +
3087 ' test is known to pass. Must be earlier than the ' +
3088 'bad revision. May be either a git or svn revision.')
3089 group.add_option('-m', '--metric',
3090 type='str',
3091 help='The desired metric to bisect on. For example ' +
3092 '"vm_rss_final_b/vm_rss_f_b"')
3093 group.add_option('-r', '--repeat_test_count',
3094 type='int',
3095 default=20,
3096 help='The number of times to repeat the performance '
3097 'test. Values will be clamped to range [1, 100]. '
3098 'Default value is 20.')
3099 group.add_option('--max_time_minutes',
3100 type='int',
3101 default=20,
3102 help='The maximum time (in minutes) to take running the '
3103 'performance tests. The script will run the performance '
3104 'tests according to --repeat_test_count, so long as it '
3105 'doesn\'t exceed --max_time_minutes. Values will be '
3106 'clamped to range [1, 60].'
3107 'Default value is 20.')
3108 group.add_option('-t', '--truncate_percent',
3109 type='int',
3110 default=25,
3111 help='The highest/lowest % are discarded to form a '
3112 'truncated mean. Values will be clamped to range [0, '
3113 '25]. Default value is 25 (highest/lowest 25% will be '
3114 'discarded).')
3115 group.add_option('--bisect_mode',
3116 type='choice',
3117 choices=[BISECT_MODE_MEAN, BISECT_MODE_STD_DEV,
3118 BISECT_MODE_RETURN_CODE],
3119 default=BISECT_MODE_MEAN,
3120 help='The bisect mode. Choices are to bisect on the '
3121 'difference in mean, std_dev, or return_code.')
3122 parser.add_option_group(group)
3124 group = optparse.OptionGroup(parser, 'Build options')
3125 group.add_option('-w', '--working_directory',
3126 type='str',
3127 help='Path to the working directory where the script '
3128 'will do an initial checkout of the chromium depot. The '
3129 'files will be placed in a subdirectory "bisect" under '
3130 'working_directory and that will be used to perform the '
3131 'bisection. This parameter is optional, if it is not '
3132 'supplied, the script will work from the current depot.')
3133 group.add_option('--build_preference',
3134 type='choice',
3135 choices=['msvs', 'ninja', 'make'],
3136 help='The preferred build system to use. On linux/mac '
3137 'the options are make/ninja. On Windows, the options '
3138 'are msvs/ninja.')
3139 group.add_option('--target_platform',
3140 type='choice',
3141 choices=['chromium', 'cros', 'android', 'android-chrome'],
3142 default='chromium',
3143 help='The target platform. Choices are "chromium" '
3144 '(current platform), "cros", or "android". If you '
3145 'specify something other than "chromium", you must be '
3146 'properly set up to build that platform.')
3147 group.add_option('--no_custom_deps',
3148 dest='no_custom_deps',
3149 action='store_true',
3150 default=False,
3151 help='Run the script with custom_deps or not.')
3152 group.add_option('--extra_src',
3153 type='str',
3154 help='Path to a script which can be used to modify '
3155 'the bisect script\'s behavior.')
3156 group.add_option('--cros_board',
3157 type='str',
3158 help='The cros board type to build.')
3159 group.add_option('--cros_remote_ip',
3160 type='str',
3161 help='The remote machine to image to.')
3162 group.add_option('--use_goma',
3163 action='store_true',
3164 help='Add a bunch of extra threads for goma, and enable '
3165 'goma')
3166 group.add_option('--goma_dir',
3167 help='Path to goma tools (or system default if not '
3168 'specified).')
3169 group.add_option('--output_buildbot_annotations',
3170 action='store_true',
3171 help='Add extra annotation output for buildbot.')
3172 group.add_option('--gs_bucket',
3173 default='',
3174 dest='gs_bucket',
3175 type='str',
3176 help=('Name of Google Storage bucket to upload or '
3177 'download build. e.g., chrome-perf'))
3178 group.add_option('--target_arch',
3179 type='choice',
3180 choices=['ia32', 'x64', 'arm'],
3181 default='ia32',
3182 dest='target_arch',
3183 help=('The target build architecture. Choices are "ia32" '
3184 '(default), "x64" or "arm".'))
3185 group.add_option('--target_build_type',
3186 type='choice',
3187 choices=['Release', 'Debug'],
3188 default='Release',
3189 help='The target build type. Choices are "Release" '
3190 '(default), or "Debug".')
3191 group.add_option('--builder_host',
3192 dest='builder_host',
3193 type='str',
3194 help=('Host address of server to produce build by posting'
3195 ' try job request.'))
3196 group.add_option('--builder_port',
3197 dest='builder_port',
3198 type='int',
3199 help=('HTTP port of the server to produce build by posting'
3200 ' try job request.'))
3201 parser.add_option_group(group)
3203 group = optparse.OptionGroup(parser, 'Debug options')
3204 group.add_option('--debug_ignore_build',
3205 action='store_true',
3206 help='DEBUG: Don\'t perform builds.')
3207 group.add_option('--debug_ignore_sync',
3208 action='store_true',
3209 help='DEBUG: Don\'t perform syncs.')
3210 group.add_option('--debug_ignore_perf_test',
3211 action='store_true',
3212 help='DEBUG: Don\'t perform performance tests.')
3213 parser.add_option_group(group)
3214 return parser
3216 def ParseCommandLine(self):
3217 """Parses the command line for bisect options."""
3218 parser = self._CreateCommandLineParser()
3219 opts, _ = parser.parse_args()
3221 try:
3222 if not opts.command:
3223 raise RuntimeError('missing required parameter: --command')
3225 if not opts.good_revision:
3226 raise RuntimeError('missing required parameter: --good_revision')
3228 if not opts.bad_revision:
3229 raise RuntimeError('missing required parameter: --bad_revision')
3231 if not opts.metric and opts.bisect_mode != BISECT_MODE_RETURN_CODE:
3232 raise RuntimeError('missing required parameter: --metric')
3234 if opts.gs_bucket:
3235 if not cloud_storage.List(opts.gs_bucket):
3236 raise RuntimeError('Invalid Google Storage: gs://%s' % opts.gs_bucket)
3237 if not opts.builder_host:
3238 raise RuntimeError('Must specify try server host name using '
3239 '--builder_host when gs_bucket is used.')
3240 if not opts.builder_port:
3241 raise RuntimeError('Must specify try server port number using '
3242 '--builder_port when gs_bucket is used.')
3243 if opts.target_platform == 'cros':
3244 # Run sudo up front to make sure credentials are cached for later.
3245 print 'Sudo is required to build cros:'
3246 print
3247 bisect_utils.RunProcess(['sudo', 'true'])
3249 if not opts.cros_board:
3250 raise RuntimeError('missing required parameter: --cros_board')
3252 if not opts.cros_remote_ip:
3253 raise RuntimeError('missing required parameter: --cros_remote_ip')
3255 if not opts.working_directory:
3256 raise RuntimeError('missing required parameter: --working_directory')
3258 metric_values = opts.metric.split('/')
3259 if (len(metric_values) != 2 and
3260 opts.bisect_mode != BISECT_MODE_RETURN_CODE):
3261 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
3263 opts.metric = metric_values
3264 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3265 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3266 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3267 opts.truncate_percent = opts.truncate_percent / 100.0
3269 for k, v in opts.__dict__.iteritems():
3270 assert hasattr(self, k), 'Invalid %s attribute in BisectOptions.' % k
3271 setattr(self, k, v)
3272 except RuntimeError, e:
3273 output_string = StringIO.StringIO()
3274 parser.print_help(file=output_string)
3275 error_message = '%s\n\n%s' % (e.message, output_string.getvalue())
3276 output_string.close()
3277 raise RuntimeError(error_message)
3279 @staticmethod
3280 def FromDict(values):
3281 """Creates an instance of BisectOptions from a dictionary.
3283 Args:
3284 values: a dict containing options to set.
3286 Returns:
3287 An instance of BisectOptions.
3289 opts = BisectOptions()
3290 for k, v in values.iteritems():
3291 assert hasattr(opts, k), 'Invalid %s attribute in BisectOptions.' % k
3292 setattr(opts, k, v)
3294 if opts.metric:
3295 metric_values = opts.metric.split('/')
3296 if len(metric_values) != 2:
3297 raise RuntimeError('Invalid metric specified: [%s]' % opts.metric)
3298 opts.metric = metric_values
3300 opts.repeat_test_count = min(max(opts.repeat_test_count, 1), 100)
3301 opts.max_time_minutes = min(max(opts.max_time_minutes, 1), 60)
3302 opts.truncate_percent = min(max(opts.truncate_percent, 0), 25)
3303 opts.truncate_percent = opts.truncate_percent / 100.0
3305 return opts
3308 def main():
3310 try:
3311 opts = BisectOptions()
3312 opts.ParseCommandLine()
3314 if opts.extra_src:
3315 extra_src = bisect_utils.LoadExtraSrc(opts.extra_src)
3316 if not extra_src:
3317 raise RuntimeError('Invalid or missing --extra_src.')
3318 _AddAdditionalDepotInfo(extra_src.GetAdditionalDepotInfo())
3320 if opts.working_directory:
3321 custom_deps = bisect_utils.DEFAULT_GCLIENT_CUSTOM_DEPS
3322 if opts.no_custom_deps:
3323 custom_deps = None
3324 bisect_utils.CreateBisectDirectoryAndSetupDepot(opts, custom_deps)
3326 os.chdir(os.path.join(os.getcwd(), 'src'))
3328 if not RemoveBuildFiles(opts.target_build_type):
3329 raise RuntimeError('Something went wrong removing the build files.')
3331 if not _IsPlatformSupported():
3332 raise RuntimeError('Sorry, this platform isn\'t supported yet.')
3334 # Check what source control method is being used, and create a
3335 # SourceControl object if possible.
3336 source_control = source_control_module.DetermineAndCreateSourceControl(opts)
3338 if not source_control:
3339 raise RuntimeError(
3340 'Sorry, only the git workflow is supported at the moment.')
3342 # gClient sync seems to fail if you're not in master branch.
3343 if (not source_control.IsInProperBranch() and
3344 not opts.debug_ignore_sync and
3345 not opts.working_directory):
3346 raise RuntimeError('You must switch to master branch to run bisection.')
3347 bisect_test = BisectPerformanceMetrics(source_control, opts)
3348 try:
3349 bisect_results = bisect_test.Run(opts.command,
3350 opts.bad_revision,
3351 opts.good_revision,
3352 opts.metric)
3353 if bisect_results['error']:
3354 raise RuntimeError(bisect_results['error'])
3355 bisect_test.FormatAndPrintResults(bisect_results)
3356 return 0
3357 finally:
3358 bisect_test.PerformCleanup()
3359 except RuntimeError, e:
3360 if opts.output_buildbot_annotations:
3361 # The perf dashboard scrapes the "results" step in order to comment on
3362 # bugs. If you change this, please update the perf dashboard as well.
3363 bisect_utils.OutputAnnotationStepStart('Results')
3364 print 'Error: %s' % e.message
3365 if opts.output_buildbot_annotations:
3366 bisect_utils.OutputAnnotationStepClosed()
3367 return 1
3370 if __name__ == '__main__':
3371 sys.exit(main())