Add string for case when supervised user creation is disabled due to "only these...
[chromium-blink-merge.git] / tools / bisect-builds.py
blobd9f394736bac949279b21074cfb83f41a0d17643
1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Snapshot Build Bisect Tool
8 This script bisects a snapshot archive using binary search. It starts at
9 a bad revision (it will try to guess HEAD) and asks for a last known-good
10 revision. It will then binary search across this revision range by downloading,
11 unzipping, and opening Chromium for you. After testing the specific revision,
12 it will ask you whether it is good or bad before continuing the search.
13 """
15 # The root URL for storage.
16 BASE_URL = 'http://commondatastorage.googleapis.com/chromium-browser-snapshots'
18 # The root URL for official builds.
19 OFFICIAL_BASE_URL = 'http://master.chrome.corp.google.com/official_builds'
21 # Changelogs URL.
22 CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
23 'perf/dashboard/ui/changelog.html?' \
24 'url=/trunk/src&range=%d%%3A%d'
26 # Official Changelogs URL.
27 OFFICIAL_CHANGELOG_URL = 'http://omahaproxy.appspot.com/'\
28 'changelog?old_version=%s&new_version=%s'
30 # DEPS file URL.
31 DEPS_FILE= 'http://src.chromium.org/viewvc/chrome/trunk/src/DEPS?revision=%d'
32 # Blink Changelogs URL.
33 BLINK_CHANGELOG_URL = 'http://build.chromium.org/f/chromium/' \
34 'perf/dashboard/ui/changelog_blink.html?' \
35 'url=/trunk&range=%d%%3A%d'
37 DONE_MESSAGE_GOOD_MIN = 'You are probably looking for a change made after %s ' \
38 '(known good), but no later than %s (first known bad).'
39 DONE_MESSAGE_GOOD_MAX = 'You are probably looking for a change made after %s ' \
40 '(known bad), but no later than %s (first known good).'
42 ###############################################################################
44 import math
45 import optparse
46 import os
47 import pipes
48 import re
49 import shutil
50 import subprocess
51 import sys
52 import tempfile
53 import threading
54 import urllib
55 from distutils.version import LooseVersion
56 from xml.etree import ElementTree
57 import zipfile
60 class PathContext(object):
61 """A PathContext is used to carry the information used to construct URLs and
62 paths when dealing with the storage server and archives."""
63 def __init__(self, platform, good_revision, bad_revision, is_official):
64 super(PathContext, self).__init__()
65 # Store off the input parameters.
66 self.platform = platform # What's passed in to the '-a/--archive' option.
67 self.good_revision = good_revision
68 self.bad_revision = bad_revision
69 self.is_official = is_official
71 # The name of the ZIP file in a revision directory on the server.
72 self.archive_name = None
74 # Set some internal members:
75 # _listing_platform_dir = Directory that holds revisions. Ends with a '/'.
76 # _archive_extract_dir = Uncompressed directory in the archive_name file.
77 # _binary_name = The name of the executable to run.
78 if self.platform in ('linux', 'linux64', 'linux-arm'):
79 self._binary_name = 'chrome'
80 elif self.platform == 'mac':
81 self.archive_name = 'chrome-mac.zip'
82 self._archive_extract_dir = 'chrome-mac'
83 elif self.platform == 'win':
84 self.archive_name = 'chrome-win32.zip'
85 self._archive_extract_dir = 'chrome-win32'
86 self._binary_name = 'chrome.exe'
87 else:
88 raise Exception('Invalid platform: %s' % self.platform)
90 if is_official:
91 if self.platform == 'linux':
92 self._listing_platform_dir = 'lucid32bit/'
93 self.archive_name = 'chrome-lucid32bit.zip'
94 self._archive_extract_dir = 'chrome-lucid32bit'
95 elif self.platform == 'linux64':
96 self._listing_platform_dir = 'lucid64bit/'
97 self.archive_name = 'chrome-lucid64bit.zip'
98 self._archive_extract_dir = 'chrome-lucid64bit'
99 elif self.platform == 'mac':
100 self._listing_platform_dir = 'mac/'
101 self._binary_name = 'Google Chrome.app/Contents/MacOS/Google Chrome'
102 elif self.platform == 'win':
103 self._listing_platform_dir = 'win/'
104 else:
105 if self.platform in ('linux', 'linux64', 'linux-arm'):
106 self.archive_name = 'chrome-linux.zip'
107 self._archive_extract_dir = 'chrome-linux'
108 if self.platform == 'linux':
109 self._listing_platform_dir = 'Linux/'
110 elif self.platform == 'linux64':
111 self._listing_platform_dir = 'Linux_x64/'
112 elif self.platform == 'linux-arm':
113 self._listing_platform_dir = 'Linux_ARM_Cross-Compile/'
114 elif self.platform == 'mac':
115 self._listing_platform_dir = 'Mac/'
116 self._binary_name = 'Chromium.app/Contents/MacOS/Chromium'
117 elif self.platform == 'win':
118 self._listing_platform_dir = 'Win/'
120 def GetListingURL(self, marker=None):
121 """Returns the URL for a directory listing, with an optional marker."""
122 marker_param = ''
123 if marker:
124 marker_param = '&marker=' + str(marker)
125 return BASE_URL + '/?delimiter=/&prefix=' + self._listing_platform_dir + \
126 marker_param
128 def GetDownloadURL(self, revision):
129 """Gets the download URL for a build archive of a specific revision."""
130 if self.is_official:
131 return "%s/%s/%s%s" % (
132 OFFICIAL_BASE_URL, revision, self._listing_platform_dir,
133 self.archive_name)
134 else:
135 return "%s/%s%s/%s" % (
136 BASE_URL, self._listing_platform_dir, revision, self.archive_name)
138 def GetLastChangeURL(self):
139 """Returns a URL to the LAST_CHANGE file."""
140 return BASE_URL + '/' + self._listing_platform_dir + 'LAST_CHANGE'
142 def GetLaunchPath(self):
143 """Returns a relative path (presumably from the archive extraction location)
144 that is used to run the executable."""
145 return os.path.join(self._archive_extract_dir, self._binary_name)
147 def ParseDirectoryIndex(self):
148 """Parses the Google Storage directory listing into a list of revision
149 numbers."""
151 def _FetchAndParse(url):
152 """Fetches a URL and returns a 2-Tuple of ([revisions], next-marker). If
153 next-marker is not None, then the listing is a partial listing and another
154 fetch should be performed with next-marker being the marker= GET
155 parameter."""
156 handle = urllib.urlopen(url)
157 document = ElementTree.parse(handle)
159 # All nodes in the tree are namespaced. Get the root's tag name to extract
160 # the namespace. Etree does namespaces as |{namespace}tag|.
161 root_tag = document.getroot().tag
162 end_ns_pos = root_tag.find('}')
163 if end_ns_pos == -1:
164 raise Exception("Could not locate end namespace for directory index")
165 namespace = root_tag[:end_ns_pos + 1]
167 # Find the prefix (_listing_platform_dir) and whether or not the list is
168 # truncated.
169 prefix_len = len(document.find(namespace + 'Prefix').text)
170 next_marker = None
171 is_truncated = document.find(namespace + 'IsTruncated')
172 if is_truncated is not None and is_truncated.text.lower() == 'true':
173 next_marker = document.find(namespace + 'NextMarker').text
175 # Get a list of all the revisions.
176 all_prefixes = document.findall(namespace + 'CommonPrefixes/' +
177 namespace + 'Prefix')
178 # The <Prefix> nodes have content of the form of
179 # |_listing_platform_dir/revision/|. Strip off the platform dir and the
180 # trailing slash to just have a number.
181 revisions = []
182 for prefix in all_prefixes:
183 revnum = prefix.text[prefix_len:-1]
184 try:
185 revnum = int(revnum)
186 revisions.append(revnum)
187 except ValueError:
188 pass
189 return (revisions, next_marker)
191 # Fetch the first list of revisions.
192 (revisions, next_marker) = _FetchAndParse(self.GetListingURL())
194 # If the result list was truncated, refetch with the next marker. Do this
195 # until an entire directory listing is done.
196 while next_marker:
197 next_url = self.GetListingURL(next_marker)
198 (new_revisions, next_marker) = _FetchAndParse(next_url)
199 revisions.extend(new_revisions)
200 return revisions
202 def GetRevList(self):
203 """Gets the list of revision numbers between self.good_revision and
204 self.bad_revision."""
205 # Download the revlist and filter for just the range between good and bad.
206 minrev = min(self.good_revision, self.bad_revision)
207 maxrev = max(self.good_revision, self.bad_revision)
208 revlist = map(int, self.ParseDirectoryIndex())
209 revlist = [x for x in revlist if x >= int(minrev) and x <= int(maxrev)]
210 revlist.sort()
211 return revlist
213 def GetOfficialBuildsList(self):
214 """Gets the list of official build numbers between self.good_revision and
215 self.bad_revision."""
216 # Download the revlist and filter for just the range between good and bad.
217 minrev = min(self.good_revision, self.bad_revision)
218 maxrev = max(self.good_revision, self.bad_revision)
219 handle = urllib.urlopen(OFFICIAL_BASE_URL)
220 dirindex = handle.read()
221 handle.close()
222 build_numbers = re.findall(r'<a href="([0-9][0-9].*)/">', dirindex)
223 final_list = []
224 i = 0
225 parsed_build_numbers = [LooseVersion(x) for x in build_numbers]
226 for build_number in sorted(parsed_build_numbers):
227 path = OFFICIAL_BASE_URL + '/' + str(build_number) + '/' + \
228 self._listing_platform_dir + self.archive_name
229 i = i + 1
230 try:
231 connection = urllib.urlopen(path)
232 connection.close()
233 if build_number > maxrev:
234 break
235 if build_number >= minrev:
236 final_list.append(str(build_number))
237 except urllib.HTTPError, e:
238 pass
239 return final_list
241 def UnzipFilenameToDir(filename, dir):
242 """Unzip |filename| to directory |dir|."""
243 cwd = os.getcwd()
244 if not os.path.isabs(filename):
245 filename = os.path.join(cwd, filename)
246 zf = zipfile.ZipFile(filename)
247 # Make base.
248 if not os.path.isdir(dir):
249 os.mkdir(dir)
250 os.chdir(dir)
251 # Extract files.
252 for info in zf.infolist():
253 name = info.filename
254 if name.endswith('/'): # dir
255 if not os.path.isdir(name):
256 os.makedirs(name)
257 else: # file
258 dir = os.path.dirname(name)
259 if not os.path.isdir(dir):
260 os.makedirs(dir)
261 out = open(name, 'wb')
262 out.write(zf.read(name))
263 out.close()
264 # Set permissions. Permission info in external_attr is shifted 16 bits.
265 os.chmod(name, info.external_attr >> 16L)
266 os.chdir(cwd)
269 def FetchRevision(context, rev, filename, quit_event=None, progress_event=None):
270 """Downloads and unzips revision |rev|.
271 @param context A PathContext instance.
272 @param rev The Chromium revision number/tag to download.
273 @param filename The destination for the downloaded file.
274 @param quit_event A threading.Event which will be set by the master thread to
275 indicate that the download should be aborted.
276 @param progress_event A threading.Event which will be set by the master thread
277 to indicate that the progress of the download should be
278 displayed.
280 def ReportHook(blocknum, blocksize, totalsize):
281 if quit_event and quit_event.isSet():
282 raise RuntimeError("Aborting download of revision %s" % str(rev))
283 if progress_event and progress_event.isSet():
284 size = blocknum * blocksize
285 if totalsize == -1: # Total size not known.
286 progress = "Received %d bytes" % size
287 else:
288 size = min(totalsize, size)
289 progress = "Received %d of %d bytes, %.2f%%" % (
290 size, totalsize, 100.0 * size / totalsize)
291 # Send a \r to let all progress messages use just one line of output.
292 sys.stdout.write("\r" + progress)
293 sys.stdout.flush()
295 download_url = context.GetDownloadURL(rev)
296 try:
297 urllib.urlretrieve(download_url, filename, ReportHook)
298 if progress_event and progress_event.isSet():
299 print
300 except RuntimeError, e:
301 pass
304 def RunRevision(context, revision, zipfile, profile, num_runs, command, args):
305 """Given a zipped revision, unzip it and run the test."""
306 print "Trying revision %s..." % str(revision)
308 # Create a temp directory and unzip the revision into it.
309 cwd = os.getcwd()
310 tempdir = tempfile.mkdtemp(prefix='bisect_tmp')
311 UnzipFilenameToDir(zipfile, tempdir)
312 os.chdir(tempdir)
314 # Run the build as many times as specified.
315 testargs = ['--user-data-dir=%s' % profile] + args
316 # The sandbox must be run as root on Official Chrome, so bypass it.
317 if context.is_official and context.platform.startswith('linux'):
318 testargs.append('--no-sandbox')
320 runcommand = []
321 for token in command.split():
322 if token == "%a":
323 runcommand.extend(testargs)
324 else:
325 runcommand.append( \
326 token.replace('%p', context.GetLaunchPath()) \
327 .replace('%s', ' '.join(testargs)))
329 for i in range(0, num_runs):
330 subproc = subprocess.Popen(runcommand,
331 bufsize=-1,
332 stdout=subprocess.PIPE,
333 stderr=subprocess.PIPE)
334 (stdout, stderr) = subproc.communicate()
336 os.chdir(cwd)
337 try:
338 shutil.rmtree(tempdir, True)
339 except Exception, e:
340 pass
342 return (subproc.returncode, stdout, stderr)
345 def AskIsGoodBuild(rev, official_builds, status, stdout, stderr):
346 """Ask the user whether build |rev| is good or bad."""
347 # Loop until we get a response that we can parse.
348 while True:
349 response = raw_input('Revision %s is [(g)ood/(b)ad/(u)nknown/(q)uit]: ' %
350 str(rev))
351 if response and response in ('g', 'b', 'u'):
352 return response
353 if response and response == 'q':
354 raise SystemExit()
357 class DownloadJob(object):
358 """DownloadJob represents a task to download a given Chromium revision."""
359 def __init__(self, context, name, rev, zipfile):
360 super(DownloadJob, self).__init__()
361 # Store off the input parameters.
362 self.context = context
363 self.name = name
364 self.rev = rev
365 self.zipfile = zipfile
366 self.quit_event = threading.Event()
367 self.progress_event = threading.Event()
369 def Start(self):
370 """Starts the download."""
371 fetchargs = (self.context,
372 self.rev,
373 self.zipfile,
374 self.quit_event,
375 self.progress_event)
376 self.thread = threading.Thread(target=FetchRevision,
377 name=self.name,
378 args=fetchargs)
379 self.thread.start()
381 def Stop(self):
382 """Stops the download which must have been started previously."""
383 self.quit_event.set()
384 self.thread.join()
385 os.unlink(self.zipfile)
387 def WaitFor(self):
388 """Prints a message and waits for the download to complete. The download
389 must have been started previously."""
390 print "Downloading revision %s..." % str(self.rev)
391 self.progress_event.set() # Display progress of download.
392 self.thread.join()
395 def Bisect(platform,
396 official_builds,
397 good_rev=0,
398 bad_rev=0,
399 num_runs=1,
400 command="%p %a",
401 try_args=(),
402 profile=None,
403 evaluate=AskIsGoodBuild):
404 """Given known good and known bad revisions, run a binary search on all
405 archived revisions to determine the last known good revision.
407 @param platform Which build to download/run ('mac', 'win', 'linux64', etc.).
408 @param official_builds Specify build type (Chromium or Official build).
409 @param good_rev Number/tag of the known good revision.
410 @param bad_rev Number/tag of the known bad revision.
411 @param num_runs Number of times to run each build for asking good/bad.
412 @param try_args A tuple of arguments to pass to the test application.
413 @param profile The name of the user profile to run with.
414 @param evaluate A function which returns 'g' if the argument build is good,
415 'b' if it's bad or 'u' if unknown.
417 Threading is used to fetch Chromium revisions in the background, speeding up
418 the user's experience. For example, suppose the bounds of the search are
419 good_rev=0, bad_rev=100. The first revision to be checked is 50. Depending on
420 whether revision 50 is good or bad, the next revision to check will be either
421 25 or 75. So, while revision 50 is being checked, the script will download
422 revisions 25 and 75 in the background. Once the good/bad verdict on rev 50 is
423 known:
425 - If rev 50 is good, the download of rev 25 is cancelled, and the next test
426 is run on rev 75.
428 - If rev 50 is bad, the download of rev 75 is cancelled, and the next test
429 is run on rev 25.
432 if not profile:
433 profile = 'profile'
435 context = PathContext(platform, good_rev, bad_rev, official_builds)
436 cwd = os.getcwd()
440 print "Downloading list of known revisions..."
441 _GetDownloadPath = lambda rev: os.path.join(cwd,
442 '%s-%s' % (str(rev), context.archive_name))
443 if official_builds:
444 revlist = context.GetOfficialBuildsList()
445 else:
446 revlist = context.GetRevList()
448 # Get a list of revisions to bisect across.
449 if len(revlist) < 2: # Don't have enough builds to bisect.
450 msg = 'We don\'t have enough builds to bisect. revlist: %s' % revlist
451 raise RuntimeError(msg)
453 # Figure out our bookends and first pivot point; fetch the pivot revision.
454 minrev = 0
455 maxrev = len(revlist) - 1
456 pivot = maxrev / 2
457 rev = revlist[pivot]
458 zipfile = _GetDownloadPath(rev)
459 fetch = DownloadJob(context, 'initial_fetch', rev, zipfile)
460 fetch.Start()
461 fetch.WaitFor()
463 # Binary search time!
464 while fetch and fetch.zipfile and maxrev - minrev > 1:
465 if bad_rev < good_rev:
466 min_str, max_str = "bad", "good"
467 else:
468 min_str, max_str = "good", "bad"
469 print 'Bisecting range [%s (%s), %s (%s)].' % (revlist[minrev], min_str, \
470 revlist[maxrev], max_str)
472 # Pre-fetch next two possible pivots
473 # - down_pivot is the next revision to check if the current revision turns
474 # out to be bad.
475 # - up_pivot is the next revision to check if the current revision turns
476 # out to be good.
477 down_pivot = int((pivot - minrev) / 2) + minrev
478 down_fetch = None
479 if down_pivot != pivot and down_pivot != minrev:
480 down_rev = revlist[down_pivot]
481 down_fetch = DownloadJob(context, 'down_fetch', down_rev,
482 _GetDownloadPath(down_rev))
483 down_fetch.Start()
485 up_pivot = int((maxrev - pivot) / 2) + pivot
486 up_fetch = None
487 if up_pivot != pivot and up_pivot != maxrev:
488 up_rev = revlist[up_pivot]
489 up_fetch = DownloadJob(context, 'up_fetch', up_rev,
490 _GetDownloadPath(up_rev))
491 up_fetch.Start()
493 # Run test on the pivot revision.
494 status = None
495 stdout = None
496 stderr = None
497 try:
498 (status, stdout, stderr) = RunRevision(context,
499 rev,
500 fetch.zipfile,
501 profile,
502 num_runs,
503 command,
504 try_args)
505 except Exception, e:
506 print >>sys.stderr, e
507 fetch.Stop()
508 fetch = None
510 # Call the evaluate function to see if the current revision is good or bad.
511 # On that basis, kill one of the background downloads and complete the
512 # other, as described in the comments above.
513 try:
514 answer = evaluate(rev, official_builds, status, stdout, stderr)
515 if answer == 'g' and good_rev < bad_rev or \
516 answer == 'b' and bad_rev < good_rev:
517 minrev = pivot
518 if down_fetch:
519 down_fetch.Stop() # Kill the download of the older revision.
520 if up_fetch:
521 up_fetch.WaitFor()
522 pivot = up_pivot
523 fetch = up_fetch
524 elif answer == 'b' and good_rev < bad_rev or \
525 answer == 'g' and bad_rev < good_rev:
526 maxrev = pivot
527 if up_fetch:
528 up_fetch.Stop() # Kill the download of the newer revision.
529 if down_fetch:
530 down_fetch.WaitFor()
531 pivot = down_pivot
532 fetch = down_fetch
533 elif answer == 'u':
534 # Nuke the revision from the revlist and choose a new pivot.
535 revlist.pop(pivot)
536 maxrev -= 1 # Assumes maxrev >= pivot.
538 if maxrev - minrev > 1:
539 # Alternate between using down_pivot or up_pivot for the new pivot
540 # point, without affecting the range. Do this instead of setting the
541 # pivot to the midpoint of the new range because adjacent revisions
542 # are likely affected by the same issue that caused the (u)nknown
543 # response.
544 if up_fetch and down_fetch:
545 fetch = [up_fetch, down_fetch][len(revlist) % 2]
546 elif up_fetch:
547 fetch = up_fetch
548 else:
549 fetch = down_fetch
550 fetch.WaitFor()
551 if fetch == up_fetch:
552 pivot = up_pivot - 1 # Subtracts 1 because revlist was resized.
553 else:
554 pivot = down_pivot
555 zipfile = fetch.zipfile
557 if down_fetch and fetch != down_fetch:
558 down_fetch.Stop()
559 if up_fetch and fetch != up_fetch:
560 up_fetch.Stop()
561 else:
562 assert False, "Unexpected return value from evaluate(): " + answer
563 except SystemExit:
564 print "Cleaning up..."
565 for f in [_GetDownloadPath(revlist[down_pivot]),
566 _GetDownloadPath(revlist[up_pivot])]:
567 try:
568 os.unlink(f)
569 except OSError:
570 pass
571 sys.exit(0)
573 rev = revlist[pivot]
575 return (revlist[minrev], revlist[maxrev])
578 def GetBlinkRevisionForChromiumRevision(rev):
579 """Returns the blink revision that was in chromium's DEPS file at
580 chromium revision |rev|."""
581 # . doesn't match newlines without re.DOTALL, so this is safe.
582 blink_re = re.compile(r'webkit_revision.:\D*(\d+)')
583 url = urllib.urlopen(DEPS_FILE % rev)
584 m = blink_re.search(url.read())
585 url.close()
586 if m:
587 return int(m.group(1))
588 else:
589 raise Exception('Could not get blink revision for cr rev %d' % rev)
592 def GetChromiumRevision(url):
593 """Returns the chromium revision read from given URL."""
594 try:
595 # Location of the latest build revision number
596 return int(urllib.urlopen(url).read())
597 except Exception, e:
598 print('Could not determine latest revision. This could be bad...')
599 return 999999999
602 def main():
603 usage = ('%prog [options] [-- chromium-options]\n'
604 'Perform binary search on the snapshot builds to find a minimal\n'
605 'range of revisions where a behavior change happened. The\n'
606 'behaviors are described as "good" and "bad".\n'
607 'It is NOT assumed that the behavior of the later revision is\n'
608 'the bad one.\n'
609 '\n'
610 'Revision numbers should use\n'
611 ' Official versions (e.g. 1.0.1000.0) for official builds. (-o)\n'
612 ' SVN revisions (e.g. 123456) for chromium builds, from trunk.\n'
613 ' Use base_trunk_revision from http://omahaproxy.appspot.com/\n'
614 ' for earlier revs.\n'
615 ' Chrome\'s about: build number and omahaproxy branch_revision\n'
616 ' are incorrect, they are from branches.\n'
617 '\n'
618 'Tip: add "-- --no-first-run" to bypass the first run prompts.')
619 parser = optparse.OptionParser(usage=usage)
620 # Strangely, the default help output doesn't include the choice list.
621 choices = ['mac', 'win', 'linux', 'linux64', 'linux-arm']
622 # linux-chromiumos lacks a continuous archive http://crbug.com/78158
623 parser.add_option('-a', '--archive',
624 choices = choices,
625 help = 'The buildbot archive to bisect [%s].' %
626 '|'.join(choices))
627 parser.add_option('-o', action="store_true", dest='official_builds',
628 help = 'Bisect across official ' +
629 'Chrome builds (internal only) instead of ' +
630 'Chromium archives.')
631 parser.add_option('-b', '--bad', type = 'str',
632 help = 'A bad revision to start bisection. ' +
633 'May be earlier or later than the good revision. ' +
634 'Default is HEAD.')
635 parser.add_option('-g', '--good', type = 'str',
636 help = 'A good revision to start bisection. ' +
637 'May be earlier or later than the bad revision. ' +
638 'Default is 0.')
639 parser.add_option('-p', '--profile', '--user-data-dir', type = 'str',
640 help = 'Profile to use; this will not reset every run. ' +
641 'Defaults to a clean profile.', default = 'profile')
642 parser.add_option('-t', '--times', type = 'int',
643 help = 'Number of times to run each build before asking ' +
644 'if it\'s good or bad. Temporary profiles are reused.',
645 default = 1)
646 parser.add_option('-c', '--command', type = 'str',
647 help = 'Command to execute. %p and %a refer to Chrome ' +
648 'executable and specified extra arguments respectively. ' +
649 'Use %s to specify all extra arguments as one string. ' +
650 'Defaults to "%p %a". Note that any extra paths ' +
651 'specified should be absolute.',
652 default = '%p %a');
653 (opts, args) = parser.parse_args()
655 if opts.archive is None:
656 print 'Error: missing required parameter: --archive'
657 print
658 parser.print_help()
659 return 1
661 # Create the context. Initialize 0 for the revisions as they are set below.
662 context = PathContext(opts.archive, 0, 0, opts.official_builds)
663 # Pick a starting point, try to get HEAD for this.
664 if opts.bad:
665 bad_rev = opts.bad
666 else:
667 bad_rev = '999.0.0.0'
668 if not opts.official_builds:
669 bad_rev = GetChromiumRevision(context.GetLastChangeURL())
671 # Find out when we were good.
672 if opts.good:
673 good_rev = opts.good
674 else:
675 good_rev = '0.0.0.0' if opts.official_builds else 0
677 if opts.official_builds:
678 good_rev = LooseVersion(good_rev)
679 bad_rev = LooseVersion(bad_rev)
680 else:
681 good_rev = int(good_rev)
682 bad_rev = int(bad_rev)
684 if opts.times < 1:
685 print('Number of times to run (%d) must be greater than or equal to 1.' %
686 opts.times)
687 parser.print_help()
688 return 1
690 (min_chromium_rev, max_chromium_rev) = Bisect(
691 opts.archive, opts.official_builds, good_rev, bad_rev, opts.times,
692 opts.command, args, opts.profile)
694 # Get corresponding blink revisions.
695 try:
696 min_blink_rev = GetBlinkRevisionForChromiumRevision(min_chromium_rev)
697 max_blink_rev = GetBlinkRevisionForChromiumRevision(max_chromium_rev)
698 except Exception, e:
699 # Silently ignore the failure.
700 min_blink_rev, max_blink_rev = 0, 0
702 # We're done. Let the user know the results in an official manner.
703 if good_rev > bad_rev:
704 print DONE_MESSAGE_GOOD_MAX % (str(min_chromium_rev), str(max_chromium_rev))
705 else:
706 print DONE_MESSAGE_GOOD_MIN % (str(min_chromium_rev), str(max_chromium_rev))
708 if min_blink_rev != max_blink_rev:
709 print 'BLINK CHANGELOG URL:'
710 print ' ' + BLINK_CHANGELOG_URL % (max_blink_rev, min_blink_rev)
711 print 'CHANGELOG URL:'
712 if opts.official_builds:
713 print OFFICIAL_CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
714 else:
715 print ' ' + CHANGELOG_URL % (min_chromium_rev, max_chromium_rev)
717 if __name__ == '__main__':
718 sys.exit(main())