2 # Copyright 2014 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Rebase DumpAccessibilityTree Tests.
8 This script is intended to be run when you make a change that could affect the
9 expected results of tests in:
11 content/test/data/accessibility
13 It assumes that you've already uploaded a change and the try jobs have finished.
14 It collects all of the results from try jobs on all platforms and updates the
15 expectation files locally. From there you can run 'git diff' to make sure all
16 of the changes look reasonable, then upload the change for code review.
25 # Load BeautifulSoup. It's checked into two places in the Chromium tree.
27 'third_party/trace-viewer/third_party/tvcm/third_party/beautifulsoup')
28 from BeautifulSoup
import BeautifulSoup
30 # The location of the DumpAccessibilityTree html test files and expectations.
31 TEST_DATA_PATH
= os
.path
.join(os
.getcwd(), 'content/test/data/accessibility')
33 # A global that keeps track of files we've already updated, so we don't
34 # bother to update the same file twice.
35 completed_files
= set()
38 '''Retrieve the current issue number as a string.'''
39 result
= os
.popen('git cl issue').read()
40 # Returns string like: 'Issue number: 12345 (https://...)'
41 return result
.split()[2]
43 def ParseFailure(name
, url
):
44 '''Parse given the name of a failing trybot and the url of its build log.'''
46 # Figure out the platform.
47 if name
.find('android') >= 0:
48 platform_suffix
= '-expected-android.txt'
49 elif name
.find('mac') >= 0:
50 platform_suffix
= '-expected-mac.txt'
51 elif name
.find('win') >= 0:
52 platform_suffix
= '-expected-win.txt'
56 # Read the content_browsertests log file.
61 '/steps/content_browsertests%20(with%20patch)/logs/stdio/text',
62 '/steps/content_browsertests/logs/stdio/text']:
63 urls
.append(url
+ url_suffix
)
65 response
= urllib
.urlopen(url
)
66 if response
.getcode() == 200:
67 data
= response
.read()
68 lines
= data
.splitlines()
74 # Parse the log file for failing tests and overwrite the expected
75 # result file locally with the actual results from the log.
79 for i
in range(len(lines
)):
81 if line
[:12] == '[ RUN ]':
83 if test_name
and line
[:8] == 'Testing:':
84 filename
= re
.search('content.test.*accessibility.(.*)', line
).group(1)
85 if test_name
and line
== 'Actual':
87 if start
and test_name
and filename
and line
[:12] == '[ FAILED ]':
88 # Get the path to the html file.
89 dst_fullpath
= os
.path
.join(TEST_DATA_PATH
, filename
)
90 # Strip off .html and replace it with the platform expected suffix.
91 dst_fullpath
= dst_fullpath
[:-5] + platform_suffix
92 if dst_fullpath
in completed_files
:
95 actual
= [line
for line
in lines
[start
: i
- 1] if line
]
96 fp
= open(dst_fullpath
, 'w')
97 fp
.write('\n'.join(actual
))
100 completed_files
.add(dst_fullpath
)
105 def ParseTrybots(data
):
106 '''Parse the code review page to find links to try bots.'''
107 soup
= BeautifulSoup(data
)
108 failures
= soup
.findAll(
110 { "class" : "build-result build-status-color-failure" })
111 print 'Found %d trybots that failed' % len(failures
)
113 name
= f
.text
.replace(' ', '')
115 ParseFailure(name
, url
)
118 '''Main. Get the issue number and parse the code review page.'''
119 if len(sys
.argv
) == 2:
124 url
= 'https://codereview.chromium.org/%s' % issue
125 print 'Fetching issue from %s' % url
126 response
= urllib
.urlopen(url
)
127 if response
.getcode() != 200:
128 print 'Error code %d accessing url: %s' % (response
.getcode(), url
)
129 data
= response
.read()
132 if __name__
== '__main__':