Extend validate_failures.py to run outside the build directory.
[official-gcc.git] / contrib / testsuite-management / validate_failures.py
blobc07be84cd0311e2c60cb5e519abc25b64387fa0f
1 #!/usr/bin/python2.6
3 # Script to compare testsuite failures against a list of known-to-fail
4 # tests.
6 # Contributed by Diego Novillo <dnovillo@google.com>
8 # Copyright (C) 2011, 2012 Free Software Foundation, Inc.
10 # This file is part of GCC.
12 # GCC is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU General Public License as published by
14 # the Free Software Foundation; either version 3, or (at your option)
15 # any later version.
17 # GCC is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
22 # You should have received a copy of the GNU General Public License
23 # along with GCC; see the file COPYING. If not, write to
24 # the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25 # Boston, MA 02110-1301, USA.
27 """This script provides a coarser XFAILing mechanism that requires no
28 detailed DejaGNU markings. This is useful in a variety of scenarios:
30 - Development branches with many known failures waiting to be fixed.
31 - Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
34 The script must be executed from the toplevel build directory. When
35 executed it will:
37 1- Determine the target built: TARGET
38 2- Determine the source directory: SRCDIR
39 3- Look for a failure manifest file in
40 <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
41 4- Collect all the <tool>.sum files from the build tree.
42 5- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
45 6- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
47 """
49 import optparse
50 import os
51 import re
52 import sys
54 # Handled test results.
55 _VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
57 # Pattern for naming manifest files. The first argument should be
58 # the toplevel GCC source directory. The second argument is the
59 # target triple used during the build.
60 _MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
62 def Error(msg):
63 print >>sys.stderr, '\nerror: %s' % msg
64 sys.exit(1)
67 class TestResult(object):
68 """Describes a single DejaGNU test result as emitted in .sum files.
70 We are only interested in representing unsuccessful tests. So, only
71 a subset of all the tests are loaded.
73 The summary line used to build the test result should have this format:
75 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
76 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
77 optional state name description
78 attributes
80 Attributes:
81 attrlist: A comma separated list of attributes.
82 Valid values:
83 flaky Indicates that this test may not always fail. These
84 tests are reported, but their presence does not affect
85 the results.
87 expire=YYYYMMDD After this date, this test will produce an error
88 whether it is in the manifest or not.
90 state: One of UNRESOLVED, XPASS or FAIL.
91 name: File name for the test.
92 description: String describing the test (flags used, dejagnu message, etc)
93 """
95 def __init__(self, summary_line):
96 try:
97 self.attrs = ''
98 if '|' in summary_line:
99 (self.attrs, summary_line) = summary_line.split('|', 1)
100 try:
101 (self.state,
102 self.name,
103 self.description) = re.match(r' *([A-Z]+): (\S+)\s(.*)',
104 summary_line).groups()
105 except:
106 print 'Failed to parse summary line: "%s"' % summary_line
107 raise
108 self.attrs = self.attrs.strip()
109 self.state = self.state.strip()
110 self.description = self.description.strip()
111 except ValueError:
112 Error('Cannot parse summary line "%s"' % summary_line)
114 if self.state not in _VALID_TEST_RESULTS:
115 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
116 self.state, summary_line, self))
118 def __lt__(self, other):
119 return self.name < other.name
121 def __hash__(self):
122 return hash(self.state) ^ hash(self.name) ^ hash(self.description)
124 def __eq__(self, other):
125 return (self.state == other.state and
126 self.name == other.name and
127 self.description == other.description)
129 def __ne__(self, other):
130 return not (self == other)
132 def __str__(self):
133 attrs = ''
134 if self.attrs:
135 attrs = '%s | ' % self.attrs
136 return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
139 def GetMakefileValue(makefile_name, value_name):
140 if os.path.exists(makefile_name):
141 with open(makefile_name) as makefile:
142 for line in makefile:
143 if line.startswith(value_name):
144 (_, value) = line.split('=', 1)
145 value = value.strip()
146 return value
147 return None
150 def ValidBuildDirectory(builddir, target):
151 if (not os.path.exists(builddir) or
152 not os.path.exists('%s/Makefile' % builddir) or
153 (not os.path.exists('%s/build-%s' % (builddir, target)) and
154 not os.path.exists('%s/%s' % (builddir, target)))):
155 return False
156 return True
159 def IsInterestingResult(line):
160 """Return True if the given line is one of the summary lines we care about."""
161 line = line.strip()
162 if line.startswith('#'):
163 return False
164 if '|' in line:
165 (_, line) = line.split('|', 1)
166 line = line.strip()
167 for result in _VALID_TEST_RESULTS:
168 if line.startswith(result):
169 return True
170 return False
173 def ParseSummary(sum_fname):
174 """Create a set of TestResult instances from the given summary file."""
175 result_set = set()
176 with open(sum_fname) as sum_file:
177 for line in sum_file:
178 if IsInterestingResult(line):
179 result_set.add(TestResult(line))
180 return result_set
183 def GetManifest(manifest_name):
184 """Build a set of expected failures from the manifest file.
186 Each entry in the manifest file should have the format understood
187 by the TestResult constructor.
189 If no manifest file exists for this target, it returns an empty
190 set.
192 if os.path.exists(manifest_name):
193 return ParseSummary(manifest_name)
194 else:
195 return set()
198 def GetSumFiles(builddir):
199 sum_files = []
200 for root, dirs, files in os.walk(builddir):
201 if '.svn' in dirs:
202 dirs.remove('.svn')
203 for fname in files:
204 if fname.endswith('.sum'):
205 sum_files.append(os.path.join(root, fname))
206 return sum_files
209 def GetResults(sum_files):
210 """Collect all the test results from the given .sum files."""
211 build_results = set()
212 for sum_fname in sum_files:
213 print '\t%s' % sum_fname
214 build_results |= ParseSummary(sum_fname)
215 return build_results
218 def CompareResults(manifest, actual):
219 """Compare sets of results and return two lists:
220 - List of results present in MANIFEST but missing from ACTUAL.
221 - List of results present in ACTUAL but missing from MANIFEST.
223 # Report all the actual results not present in the manifest.
224 actual_vs_manifest = set()
225 for actual_result in actual:
226 if actual_result not in manifest:
227 actual_vs_manifest.add(actual_result)
229 # Simlarly for all the tests in the manifest.
230 manifest_vs_actual = set()
231 for expected_result in manifest:
232 # Ignore tests marked flaky.
233 if 'flaky' in expected_result.attrs:
234 continue
235 if expected_result not in actual:
236 manifest_vs_actual.add(expected_result)
238 return actual_vs_manifest, manifest_vs_actual
241 def GetBuildData(options):
242 target = GetMakefileValue('%s/Makefile' % options.build_dir, 'target_alias=')
243 srcdir = GetMakefileValue('%s/Makefile' % options.build_dir, 'srcdir =')
244 if not ValidBuildDirectory(options.build_dir, target):
245 Error('%s is not a valid GCC top level build directory.' %
246 options.build_dir)
247 print 'Source directory: %s' % srcdir
248 print 'Build target: %s' % target
249 return srcdir, target, True
252 def PrintSummary(msg, summary):
253 print '\n\n%s' % msg
254 for result in sorted(summary):
255 print result
258 def CheckExpectedResults(options):
259 if not options.manifest:
260 (srcdir, target, valid_build) = GetBuildData(options)
261 if not valid_build:
262 return False
263 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
264 else:
265 manifest_name = options.manifest
266 if not os.path.exists(manifest_name):
267 Error('Manifest file %s does not exist.' % manifest_name)
269 print 'Manifest: %s' % manifest_name
270 manifest = GetManifest(manifest_name)
272 if not options.results:
273 print 'Getting actual results from build'
274 sum_files = GetSumFiles(options.build_dir)
275 else:
276 print 'Getting actual results from user-provided results'
277 sum_files = options.results.split()
278 actual = GetResults(sum_files)
280 if options.verbosity >= 1:
281 PrintSummary('Tests expected to fail', manifest)
282 PrintSummary('\nActual test results', actual)
284 actual_vs_manifest, manifest_vs_actual = CompareResults(manifest, actual)
286 tests_ok = True
287 if len(actual_vs_manifest) > 0:
288 PrintSummary('Build results not in the manifest', actual_vs_manifest)
289 tests_ok = False
291 if not options.ignore_missing_failures and len(manifest_vs_actual) > 0:
292 PrintSummary('Manifest results not present in the build'
293 '\n\nNOTE: This is not a failure. It just means that the '
294 'manifest expected\nthese tests to fail, '
295 'but they worked in this configuration.\n',
296 manifest_vs_actual)
298 if tests_ok:
299 print '\nSUCCESS: No unexpected failures.'
301 return tests_ok
304 def ProduceManifest(options):
305 (srcdir, target, valid_build) = GetBuildData(options)
306 if not valid_build:
307 return False
309 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
310 if os.path.exists(manifest_name) and not options.force:
311 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
312 manifest_name)
314 actual = GetResults(options.build_dir)
315 with open(manifest_name, 'w') as manifest_file:
316 for result in sorted(actual):
317 print result
318 manifest_file.write('%s\n' % result)
320 return True
323 def Main(argv):
324 parser = optparse.OptionParser(usage=__doc__)
326 # Keep the following list sorted by option name.
327 parser.add_option('--build_dir', action='store', type='string',
328 dest='build_dir', default='.',
329 help='Build directory to check (default = .)')
330 parser.add_option('--force', action='store_true', dest='force',
331 default=False, help='When used with --produce_manifest, '
332 'it will overwrite an existing manifest file '
333 '(default = False)')
334 parser.add_option('--ignore_missing_failures', action='store_true',
335 dest='ignore_missing_failures', default=False,
336 help='When a failure is expected in the manifest but '
337 'it is not found in the actual results, the script '
338 'produces a note alerting to this fact. This means '
339 'that the expected failure has been fixed, or '
340 'it did not run, or it may simply be flaky '
341 '(default = False)')
342 parser.add_option('--manifest', action='store', type='string',
343 dest='manifest', default=None,
344 help='Name of the manifest file to use (default = '
345 'taken from contrib/testsuite-managment/<target>.xfail)')
346 parser.add_option('--produce_manifest', action='store_true',
347 dest='produce_manifest', default=False,
348 help='Produce the manifest for the current '
349 'build (default = False)')
350 parser.add_option('--results', action='store', type='string',
351 dest='results', default=None, help='Space-separated list '
352 'of .sum files with the testing results to check. The '
353 'only content needed from these files are the lines '
354 'starting with FAIL, XPASS or UNRESOLVED (default = '
355 '.sum files collected from the build directory).')
356 parser.add_option('--verbosity', action='store', dest='verbosity',
357 type='int', default=0, help='Verbosity level (default = 0)')
358 (options, _) = parser.parse_args(argv[1:])
360 if options.produce_manifest:
361 retval = ProduceManifest(options)
362 else:
363 retval = CheckExpectedResults(options)
365 if retval:
366 return 0
367 else:
368 return 1
370 if __name__ == '__main__':
371 retval = Main(sys.argv)
372 sys.exit(retval)