Support WebRTC in Android build toolchain
[chromium-blink-merge.git] / tools / bisect-builds.py
blob3e8254b38aa28f9d1f6a76ed84651226496fafa9
1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Snapshot Build Bisect Tool
8 This script bisects a snapshot archive using binary search. It starts at
9 a bad revision (it will try to guess HEAD) and asks for a last known-good
10 revision. It will then binary search across this revision range by downloading,
11 unzipping, and opening Chromium for you. After testing the specific revision,
12 it will ask you whether it is good or bad before continuing the search.
13 """
15 # The root URL for storage.
16 BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots'
18 # The root URL for official builds.
19 OFFICIAL_BASE_URL = 'http://master.chrome.corp.google.com/official_builds'
21 # Changelogs URL.
22 CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
23 'perf/dashboard/ui/changelog.html?' \
24 'url=/trunk/src&range=%d%%3A%d'
26 # Official Changelogs URL.
27 OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\
28 'changelog?old_version=%s&new_version=%s'
30 # DEPS file URL.
31 DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d'
32 # Blink Changelogs URL.
33 BLINK_CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
34 'perf/dashboard/ui/changelog_blink.html?' \
35 'url=/trunk&range=%d%%3A%d'
37 DONE_MESSAGE_GOOD_MIN = 'You are probably looking for a change made after %s ' \
38 '(known good), but no later than %s (first known bad).'
39 DONE_MESSAGE_GOOD_MAX = 'You are probably looking for a change made after %s ' \
40 '(known bad), but no later than %s (first known good).'
42 ###############################################################################
44 import math
45 import optparse
46 import os
47 import pipes
48 import re
49 import shutil
50 import subprocess
51 import sys
52 import tempfile
53 import threading
54 import urllib
55 from distutils.version import LooseVersion
56 from xml.etree import ElementTree
57 import zipfile
60 class PathContext(object):
61 """A PathContext is used to carry the information used to construct URLs and
62 paths when dealing with the storage server and archives."""
63 def __init__(self, platform, good_revision, bad_revision, is_official):
64 super(PathContext, self).__init__()
65 # Store off the input parameters.
66 self.platform = platform # What's passed in to the '-a/--archive' option.
67 self.good_revision = good_revision
68 self.bad_revision = bad_revision
69 self.is_official = is_official
71 # The name of the ZIP file in a revision directory on the server.
72 self.archive_name = None
74 # Set some internal members:
75 # _listing_platform_dir = Directory that holds revisions. Ends with a '/'.
76 # _archive_extract_dir = Uncompressed directory in the archive_name file.
77 # _binary_name = The name of the executable to run.
78 if self.platform in ('linux', 'linux64', 'linux-arm'):
79 self._binary_name = 'chrome'
80 elif self.platform == 'mac':
81 self.archive_name = 'chrome-mac.zip'
82 self._archive_extract_dir = 'chrome-mac'
83 elif self.platform == 'win':
84 self.archive_name = 'chrome-win32.zip'
85 self._archive_extract_dir = 'chrome-win32'
86 self._binary_name = 'chrome.exe'
87 else:
88 raise Exception('Invalid platform: %s' % self.platform)
90 if is_official:
91 if self.platform == 'linux':
92 self._listing_platform_dir = 'lucid32bit/'
93 self.archive_name = 'chrome-lucid32bit.zip'
94 self._archive_extract_dir = 'chrome-lucid32bit'
95 elif self.platform == 'linux64':
96 self._listing_platform_dir = 'lucid64bit/'
97 self.archive_name = 'chrome-lucid64bit.zip'
98 self._archive_extract_dir = 'chrome-lucid64bit'
99 elif self.platform == 'mac':
100 self._listing_platform_dir = 'mac/'
101 self._binary_name = 'Google Chrome.app/Contents/MacOS/Google Chrome'
102 elif self.platform == 'win':
103 self._listing_platform_dir = 'win/'
104 else:
105 if self.platform in ('linux', 'linux64', 'linux-arm'):
106 self.archive_name = 'chrome-linux.zip'
107 self._archive_extract_dir = 'chrome-linux'
108 if self.platform == 'linux':
109 self._listing_platform_dir = 'Linux/'
110 elif self.platform == 'linux64':
111 self._listing_platform_dir = 'Linux_x64/'
112 elif self.platform == 'linux-arm':
113 self._listing_platform_dir = 'Linux_ARM_Cross-Compile/'
114 elif self.platform == 'mac':
115 self._listing_platform_dir = 'Mac/'
116 self._binary_name = 'Chromium.app/Contents/MacOS/Chromium'
117 elif self.platform == 'win':
118 self._listing_platform_dir = 'Win/'
120 def GetListingURL(self, marker=None):
121 """Returns the URL for a directory listing, with an optional marker."""
122 marker_param = ''
123 if marker:
124 marker_param = '&marker=' + str(marker)
125 return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \
126 marker_param
128 def GetDownloadURL(self, revision):
129 """Gets the download URL for a build archive of a specific revision."""
130 if self.is_official:
131 return "%s/%s/%s%s" % (
132 OFFICIAL_BASE_URL, revision, self._listing_platform_dir,
133 self.archive_name)
134 else:
135 return "%s/%s%s/%s" % (
136 BASE_URL, self._listing_platform_dir, revision, self.archive_name)
138 def GetLastChangeURL(self):
139 """Returns a URL to the LAST_CHANGE file."""
140 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE'
142 def GetLaunchPath(self):
143 """Returns a relative path (presumably from the archive extraction location)
144 that is used to run the executable."""
145 return os.path.join(self._archive_extract_dir, self._binary_name)
147 def ParseDirectoryIndex(self):
148 """Parses the Google Storage directory listing into a list of revision
149 numbers."""
151 def _FetchAndParse(url):
152 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
153 next-marker is not None, then the listing is a partial listing and another
154 fetch should be performed with next-marker being the marker= GET
155 parameter."""
156 handle = urllib.urlopen(url)
157 document = ElementTree.parse(handle)
159 # All nodes in the tree are namespaced. Get the root's tag name to extract
160 # the namespace. Etree does namespaces as |{namespace}tag|.
161 root_tag = document.getroot().tag
162 end_ns_pos = root_tag.find('}')
163 if end_ns_pos == -1:
164 raise Exception("Could not locate end namespace for directory index")
165 namespace = root_tag[:end_ns_pos + 1]
167 # Find the prefix (_listing_platform_dir) and whether or not the list is
168 # truncated.
169 prefix_len = len(document.find(namespace + 'Prefix').text)
170 next_marker = None
171 is_truncated = document.find(namespace + 'IsTruncated')
172 if is_truncated is not None and is_truncated.text.lower() == 'true':
173 next_marker = document.find(namespace + 'NextMarker').text
175 # Get a list of all the revisions.
176 all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
177 namespace + 'Prefix')
178 # The <Prefix> nodes have content of the form of
179 # |_listing_platform_dir/revision/|. Strip off the platform dir and the
180 # trailing slash to just have a number.
181 revisions = []
182 for prefix in all_prefixes:
183 revnum = prefix.text[prefix_len:-1]
184 try:
185 revnum = int(revnum)
186 revisions.append(revnum)
187 except ValueError:
188 pass
189 return (revisions, next_marker)
191 # Fetch the first list of revisions.
192 (revisions, next_marker) = _FetchAndParse(self.GetListingURL())
194 # If the result list was truncated, refetch with the next marker. Do this
195 # until an entire directory listing is done.
196 while next_marker:
197 next_url = self.GetListingURL(next_marker)
198 (new_revisions, next_marker) = _FetchAndParse(next_url)
199 revisions.extend(new_revisions)
200 return revisions
202 def GetRevList(self):
203 """Gets the list of revision numbers between self.good_revision and
204 self.bad_revision."""
205 # Download the revlist and filter for just the range between good and bad.
206 minrev = min(self.good_revision, self.bad_revision)
207 maxrev = max(self.good_revision, self.bad_revision)
208 revlist = map(int, self.ParseDirectoryIndex())
209 revlist = [x for x in revlist if x >= int(minrev) and x <= int(maxrev)]
210 revlist.sort()
211 return revlist
213 def GetOfficialBuildsList(self):
214 """Gets the list of official build numbers between self.good_revision and
215 self.bad_revision."""
216 # Download the revlist and filter for just the range between good and bad.
217 minrev = min(self.good_revision, self.bad_revision)
218 maxrev = max(self.good_revision, self.bad_revision)
219 handle = urllib.urlopen(OFFICIAL_BASE_URL)
220 dirindex = handle.read()
221 handle.close()
222 build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex)
223 final_list = []
224 i = 0
225 parsed_build_numbers = [LooseVersion(x) for x in build_numbers]
226 for build_number in sorted(parsed_build_numbers):
227 path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \
228 self._listing_platform_dir + self.archive_name
229 i = i + 1
230 try:
231 connection = urllib.urlopen(path)
232 connection.close()
233 if build_number > maxrev:
234 break
235 if build_number >= minrev:
236 final_list.append(str(build_number))
237 except urllib.HTTPError, e:
238 pass
239 return final_list
241 def UnzipFilenameToDir(filename, dir):
242 """Unzip |filename| to directory |dir|."""
243 cwd = os.getcwd()
244 if not os.path.isabs(filename):
245 filename = os.path.join(cwd, filename)
246 zf = zipfile.ZipFile(filename)
247 # Make base.
248 if not os.path.isdir(dir):
249 os.mkdir(dir)
250 os.chdir(dir)
251 # Extract files.
252 for info in zf.infolist():
253 name = info.filename
254 if name.endswith('/'): # dir
255 if not os.path.isdir(name):
256 os.makedirs(name)
257 else: # file
258 dir = os.path.dirname(name)
259 if not os.path.isdir(dir):
260 os.makedirs(dir)
261 out = open(name, 'wb')
262 out.write(zf.read(name))
263 out.close()
264 # Set permissions. Permission info in external_attr is shifted 16 bits.
265 os.chmod(name, info.external_attr >> 16L)
266 os.chdir(cwd)
269 def FetchRevision(context, rev, filename, quit_event=None, progress_event=None):
270 """Downloads and unzips revision |rev|.
271 @param context A PathContext instance.
272 @param rev The Chromium revision number/tag to download.
273 @param filename The destination for the downloaded file.
274 @param quit_event A threading.Event which will be set by the master thread to
275 indicate that the download should be aborted.
276 @param progress_event A threading.Event which will be set by the master thread
277 to indicate that the progress of the download should be
278 displayed.
280 def ReportHook(blocknum, blocksize, totalsize):
281 if quit_event and quit_event.isSet():
282 raise RuntimeError("Aborting download of revision %s" % str(rev))
283 if progress_event and progress_event.isSet():
284 size = blocknum * blocksize
285 if totalsize == -1: # Total size not known.
286 progress = "Received %d bytes" % size
287 else:
288 size = min(totalsize, size)
289 progress = "Received %d of %d bytes, %.2f%%" % (
290 size, totalsize, 100.0 * size / totalsize)
291 # Send a \r to let all progress messages use just one line of output.
292 sys.stdout.write("\r" + progress)
293 sys.stdout.flush()
295 download_url = context.GetDownloadURL(rev)
296 try:
297 urllib.urlretrieve(download_url, filename, ReportHook)
298 if progress_event and progress_event.isSet():
299 print
300 except RuntimeError, e:
301 pass
304 def RunRevision(context, revision, zipfile, profile, num_runs, command, args):
305 """Given a zipped revision, unzip it and run the test."""
306 print "Trying revision %s..." % str(revision)
308 # Create a temp directory and unzip the revision into it.
309 cwd = os.getcwd()
310 tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
311 UnzipFilenameToDir(zipfile, tempdir)
312 os.chdir(tempdir)
314 # Run the build as many times as specified.
315 testargs = ['--user-data-dir=%s' % profile] + args
316 # The sandbox must be run as root on Official Chrome, so bypass it.
317 if context.is_official and context.platform.startswith('linux'):
318 testargs.append('--no-sandbox')
320 runcommand = []
321 for token in command.split():
322 if token == "%a":
323 runcommand.extend(testargs)
324 else:
325 runcommand.append( \
326 token.replace('%p', context.GetLaunchPath()) \
327 .replace('%s', ' '.join(testargs)))
329 for i in range(0, num_runs):
330 subproc = subprocess.Popen(runcommand,
331 bufsize=-1,
332 stdout=subprocess.PIPE,
333 stderr=subprocess.PIPE)
334 (stdout, stderr) = subproc.communicate()
336 os.chdir(cwd)
337 try:
338 shutil.rmtree(tempdir, True)
339 except Exception, e:
340 pass
342 return (subproc.returncode, stdout, stderr)
345 def AskIsGoodBuild(rev, official_builds, status, stdout, stderr):
346 """Ask the user whether build |rev| is good or bad."""
347 # Loop until we get a response that we can parse.
348 while True:
349 response = raw_input('Revision %s is ' \
350 '[(g)ood/(b)ad/(r)etry/(u)nknown/(q)uit]: ' %
351 str(rev))
352 if response and response in ('g', 'b', 'r', 'u'):
353 return response
354 if response and response == 'q':
355 raise SystemExit()
358 class DownloadJob(object):
359 """DownloadJob represents a task to download a given Chromium revision."""
360 def __init__(self, context, name, rev, zipfile):
361 super(DownloadJob, self).__init__()
362 # Store off the input parameters.
363 self.context = context
364 self.name = name
365 self.rev = rev
366 self.zipfile = zipfile
367 self.quit_event = threading.Event()
368 self.progress_event = threading.Event()
370 def Start(self):
371 """Starts the download."""
372 fetchargs = (self.context,
373 self.rev,
374 self.zipfile,
375 self.quit_event,
376 self.progress_event)
377 self.thread = threading.Thread(target=FetchRevision,
378 name=self.name,
379 args=fetchargs)
380 self.thread.start()
382 def Stop(self):
383 """Stops the download which must have been started previously."""
384 self.quit_event.set()
385 self.thread.join()
386 os.unlink(self.zipfile)
388 def WaitFor(self):
389 """Prints a message and waits for the download to complete. The download
390 must have been started previously."""
391 print "Downloading revision %s..." % str(self.rev)
392 self.progress_event.set() # Display progress of download.
393 self.thread.join()
396 def Bisect(platform,
397 official_builds,
398 good_rev=0,
399 bad_rev=0,
400 num_runs=1,
401 command="%p %a",
402 try_args=(),
403 profile=None,
404 evaluate=AskIsGoodBuild):
405 """Given known good and known bad revisions, run a binary search on all
406 archived revisions to determine the last known good revision.
408 @param platform Which build to download/run ('mac', 'win', 'linux64', etc.).
409 @param official_builds Specify build type (Chromium or Official build).
410 @param good_rev Number/tag of the known good revision.
411 @param bad_rev Number/tag of the known bad revision.
412 @param num_runs Number of times to run each build for asking good/bad.
413 @param try_args A tuple of arguments to pass to the test application.
414 @param profile The name of the user profile to run with.
415 @param evaluate A function which returns 'g' if the argument build is good,
416 'b' if it's bad or 'u' if unknown.
418 Threading is used to fetch Chromium revisions in the background, speeding up
419 the user's experience. For example, suppose the bounds of the search are
420 good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on
421 whether revision 50 is good or bad, the next revision to check will be either
422 25 or 75. So, while revision 50 is being checked, the script will download
423 revisions 25 and 75 in the background. Once the good/bad verdict on rev 50 is
424 known:
426 - If rev 50 is good, the download of rev 25 is cancelled, and the next test
427 is run on rev 75.
429 - If rev 50 is bad, the download of rev 75 is cancelled, and the next test
430 is run on rev 25.
433 if not profile:
434 profile = 'profile'
436 context = PathContext(platform, good_rev, bad_rev, official_builds)
437 cwd = os.getcwd()
441 print "Downloading list of known revisions..."
442 _GetDownloadPath = lambda rev: os.path.join(cwd,
443 '%s-%s' % (str(rev), context.archive_name))
444 if official_builds:
445 revlist = context.GetOfficialBuildsList()
446 else:
447 revlist = context.GetRevList()
449 # Get a list of revisions to bisect across.
450 if len(revlist) < 2: # Don't have enough builds to bisect.
451 msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
452 raise RuntimeError(msg)
454 # Figure out our bookends and first pivot point; fetch the pivot revision.
455 minrev = 0
456 maxrev = len(revlist) - 1
457 pivot = maxrev / 2
458 rev = revlist[pivot]
459 zipfile = _GetDownloadPath(rev)
460 fetch = DownloadJob(context, 'initial_fetch', rev, zipfile)
461 fetch.Start()
462 fetch.WaitFor()
464 # Binary search time!
465 while fetch and fetch.zipfile and maxrev - minrev > 1:
466 if bad_rev < good_rev:
467 min_str, max_str = "bad", "good"
468 else:
469 min_str, max_str = "good", "bad"
470 print 'Bisecting range [%s (%s), %s (%s)].' % (revlist[minrev], min_str, \
471 revlist[maxrev], max_str)
473 # Pre-fetch next two possible pivots
474 # - down_pivot is the next revision to check if the current revision turns
475 # out to be bad.
476 # - up_pivot is the next revision to check if the current revision turns
477 # out to be good.
478 down_pivot = int((pivot - minrev) / 2) + minrev
479 down_fetch = None
480 if down_pivot != pivot and down_pivot != minrev:
481 down_rev = revlist[down_pivot]
482 down_fetch = DownloadJob(context, 'down_fetch', down_rev,
483 _GetDownloadPath(down_rev))
484 down_fetch.Start()
486 up_pivot = int((maxrev - pivot) / 2) + pivot
487 up_fetch = None
488 if up_pivot != pivot and up_pivot != maxrev:
489 up_rev = revlist[up_pivot]
490 up_fetch = DownloadJob(context, 'up_fetch', up_rev,
491 _GetDownloadPath(up_rev))
492 up_fetch.Start()
494 # Run test on the pivot revision.
495 status = None
496 stdout = None
497 stderr = None
498 try:
499 (status, stdout, stderr) = RunRevision(context,
500 rev,
501 fetch.zipfile,
502 profile,
503 num_runs,
504 command,
505 try_args)
506 except Exception, e:
507 print >>sys.stderr, e
509 # Call the evaluate function to see if the current revision is good or bad.
510 # On that basis, kill one of the background downloads and complete the
511 # other, as described in the comments above.
512 try:
513 answer = evaluate(rev, official_builds, status, stdout, stderr)
514 if answer == 'g' and good_rev < bad_rev or \
515 answer == 'b' and bad_rev < good_rev:
516 fetch.Stop()
517 minrev = pivot
518 if down_fetch:
519 down_fetch.Stop() # Kill the download of the older revision.
520 fetch = None
521 if up_fetch:
522 up_fetch.WaitFor()
523 pivot = up_pivot
524 fetch = up_fetch
525 elif answer == 'b' and good_rev < bad_rev or \
526 answer == 'g' and bad_rev < good_rev:
527 fetch.Stop()
528 maxrev = pivot
529 if up_fetch:
530 up_fetch.Stop() # Kill the download of the newer revision.
531 fetch = None
532 if down_fetch:
533 down_fetch.WaitFor()
534 pivot = down_pivot
535 fetch = down_fetch
536 elif answer == 'r':
537 pass # Retry requires no changes.
538 elif answer == 'u':
539 # Nuke the revision from the revlist and choose a new pivot.
540 fetch.Stop()
541 revlist.pop(pivot)
542 maxrev -= 1 # Assumes maxrev >= pivot.
544 if maxrev - minrev > 1:
545 # Alternate between using down_pivot or up_pivot for the new pivot
546 # point, without affecting the range. Do this instead of setting the
547 # pivot to the midpoint of the new range because adjacent revisions
548 # are likely affected by the same issue that caused the (u)nknown
549 # response.
550 if up_fetch and down_fetch:
551 fetch = [up_fetch, down_fetch][len(revlist) % 2]
552 elif up_fetch:
553 fetch = up_fetch
554 else:
555 fetch = down_fetch
556 fetch.WaitFor()
557 if fetch == up_fetch:
558 pivot = up_pivot - 1 # Subtracts 1 because revlist was resized.
559 else:
560 pivot = down_pivot
561 zipfile = fetch.zipfile
563 if down_fetch and fetch != down_fetch:
564 down_fetch.Stop()
565 if up_fetch and fetch != up_fetch:
566 up_fetch.Stop()
567 else:
568 assert False, "Unexpected return value from evaluate(): " + answer
569 except SystemExit:
570 print "Cleaning up..."
571 for f in [_GetDownloadPath(revlist[down_pivot]),
572 _GetDownloadPath(revlist[up_pivot])]:
573 try:
574 os.unlink(f)
575 except OSError:
576 pass
577 sys.exit(0)
579 rev = revlist[pivot]
581 return (revlist[minrev], revlist[maxrev])
584 def GetBlinkRevisionForChromiumRevision(rev):
585 """Returns the blink revision that was in chromium's DEPS file at
586 chromium revision |rev|."""
587 # . doesn't match newlines without re.DOTALL, so this is safe.
588 blink_re = re.compile(r'webkit_revision.:\D*(\d+)')
589 url = urllib.urlopen(DEPS_FILE % rev)
590 m = blink_re.search(url.read())
591 url.close()
592 if m:
593 return int(m.group(1))
594 else:
595 raise Exception('Could not get blink revision for cr rev %d' % rev)
598 def GetChromiumRevision(url):
599 """Returns the chromium revision read from given URL."""
600 try:
601 # Location of the latest build revision number
602 return int(urllib.urlopen(url).read())
603 except Exception, e:
604 print('Could not determine latest revision. This could be bad...')
605 return 999999999
608 def main():
609 usage = ('%prog [options] [-- chromium-options]\n'
610 'Perform binary search on the snapshot builds to find a minimal\n'
611 'range of revisions where a behavior change happened. The\n'
612 'behaviors are described as "good" and "bad".\n'
613 'It is NOT assumed that the behavior of the later revision is\n'
614 'the bad one.\n'
615 '\n'
616 'Revision numbers should use\n'
617 ' Official versions (e.g. 1.0.1000.0) for official builds. (-o)\n'
618 ' SVN revisions (e.g. 123456) for chromium builds, from trunk.\n'
619 ' Use base_trunk_revision from http://omahaproxy.appspot.com/\n'
620 ' for earlier revs.\n'
621 ' Chrome\'s about: build number and omahaproxy branch_revision\n'
622 ' are incorrect, they are from branches.\n'
623 '\n'
624 'Tip: add "-- --no-first-run" to bypass the first run prompts.')
625 parser = optparse.OptionParser(usage=usage)
626 # Strangely, the default help output doesn't include the choice list.
627 choices = ['mac', 'win', 'linux', 'linux64', 'linux-arm']
628 # linux-chromiumos lacks a continuous archive http://crbug.com/78158
629 parser.add_option('-a', '--archive',
630 choices = choices,
631 help = 'The buildbot archive to bisect [%s].' %
632 '|'.join(choices))
633 parser.add_option('-o', action="store_true", dest='official_builds',
634 help = 'Bisect across official ' +
635 'Chrome builds (internal only) instead of ' +
636 'Chromium archives.')
637 parser.add_option('-b', '--bad', type = 'str',
638 help = 'A bad revision to start bisection. ' +
639 'May be earlier or later than the good revision. ' +
640 'Default is HEAD.')
641 parser.add_option('-g', '--good', type = 'str',
642 help = 'A good revision to start bisection. ' +
643 'May be earlier or later than the bad revision. ' +
644 'Default is 0.')
645 parser.add_option('-p', '--profile', '--user-data-dir', type = 'str',
646 help = 'Profile to use; this will not reset every run. ' +
647 'Defaults to a clean profile.', default = 'profile')
648 parser.add_option('-t', '--times', type = 'int',
649 help = 'Number of times to run each build before asking ' +
650 'if it\'s good or bad. Temporary profiles are reused.',
651 default = 1)
652 parser.add_option('-c', '--command', type = 'str',
653 help = 'Command to execute. %p and %a refer to Chrome ' +
654 'executable and specified extra arguments respectively. ' +
655 'Use %s to specify all extra arguments as one string. ' +
656 'Defaults to "%p %a". Note that any extra paths ' +
657 'specified should be absolute.',
658 default = '%p %a');
659 (opts, args) = parser.parse_args()
661 if opts.archive is None:
662 print 'Error: missing required parameter: --archive'
663 print
664 parser.print_help()
665 return 1
667 # Create the context. Initialize 0 for the revisions as they are set below.
668 context = PathContext(opts.archive, 0, 0, opts.official_builds)
669 # Pick a starting point, try to get HEAD for this.
670 if opts.bad:
671 bad_rev = opts.bad
672 else:
673 bad_rev = '999.0.0.0'
674 if not opts.official_builds:
675 bad_rev = GetChromiumRevision(context.GetLastChangeURL())
677 # Find out when we were good.
678 if opts.good:
679 good_rev = opts.good
680 else:
681 good_rev = '0.0.0.0' if opts.official_builds else 0
683 if opts.official_builds:
684 good_rev = LooseVersion(good_rev)
685 bad_rev = LooseVersion(bad_rev)
686 else:
687 good_rev = int(good_rev)
688 bad_rev = int(bad_rev)
690 if opts.times < 1:
691 print('Number of times to run (%d) must be greater than or equal to 1.' %
692 opts.times)
693 parser.print_help()
694 return 1
696 (min_chromium_rev, max_chromium_rev) = Bisect(
697 opts.archive, opts.official_builds, good_rev, bad_rev, opts.times,
698 opts.command, args, opts.profile)
700 # Get corresponding blink revisions.
701 try:
702 min_blink_rev = GetBlinkRevisionForChromiumRevision(min_chromium_rev)
703 max_blink_rev = GetBlinkRevisionForChromiumRevision(max_chromium_rev)
704 except Exception, e:
705 # Silently ignore the failure.
706 min_blink_rev, max_blink_rev = 0, 0
708 # We're done. Let the user know the results in an official manner.
709 if good_rev > bad_rev:
710 print DONE_MESSAGE_GOOD_MAX % (str(min_chromium_rev), str(max_chromium_rev))
711 else:
712 print DONE_MESSAGE_GOOD_MIN % (str(min_chromium_rev), str(max_chromium_rev))
714 if min_blink_rev != max_blink_rev:
715 print 'BLINK CHANGELOG URL:'
716 print ' ' + BLINK_CHANGELOG_URL % (max_blink_rev, min_blink_rev)
717 print 'CHANGELOG URL:'
718 if opts.official_builds:
719 print OFFICIAL_CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
720 else:
721 print ' ' + CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
723 if __name__ == '__main__':
724 sys.exit(main())