Add a new option --clean_build to validate_failures.py
[official-gcc.git] / contrib / testsuite-management / validate_failures.py
blob739193715cbe8798e0f65aeafc376a5bd47e3878
1 #!/usr/bin/python
3 # Script to compare testsuite failures against a list of known-to-fail
4 # tests.
6 # Contributed by Diego Novillo <dnovillo@google.com>
8 # Copyright (C) 2011, 2012 Free Software Foundation, Inc.
10 # This file is part of GCC.
12 # GCC is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU General Public License as published by
14 # the Free Software Foundation; either version 3, or (at your option)
15 # any later version.
17 # GCC is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
22 # You should have received a copy of the GNU General Public License
23 # along with GCC; see the file COPYING. If not, write to
24 # the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25 # Boston, MA 02110-1301, USA.
27 """This script provides a coarser XFAILing mechanism that requires no
28 detailed DejaGNU markings. This is useful in a variety of scenarios:
30 - Development branches with many known failures waiting to be fixed.
31 - Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
34 The script must be executed from the toplevel build directory. When
35 executed it will:
37 1- Determine the target built: TARGET
38 2- Determine the source directory: SRCDIR
39 3- Look for a failure manifest file in
40 <SRCDIR>/contrib/testsuite-management/<TARGET>.xfail
41 4- Collect all the <tool>.sum files from the build tree.
42 5- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
45 6- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
47 """
49 import datetime
50 import optparse
51 import os
52 import re
53 import sys
55 # Handled test results.
56 _VALID_TEST_RESULTS = [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
58 # Pattern for naming manifest files. The first argument should be
59 # the toplevel GCC source directory. The second argument is the
60 # target triple used during the build.
61 _MANIFEST_PATH_PATTERN = '%s/contrib/testsuite-management/%s.xfail'
63 def Error(msg):
64 print >>sys.stderr, '\nerror: %s' % msg
65 sys.exit(1)
68 class TestResult(object):
69 """Describes a single DejaGNU test result as emitted in .sum files.
71 We are only interested in representing unsuccessful tests. So, only
72 a subset of all the tests are loaded.
74 The summary line used to build the test result should have this format:
76 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
77 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
78 optional state name description
79 attributes
81 Attributes:
82 attrlist: A comma separated list of attributes.
83 Valid values:
84 flaky Indicates that this test may not always fail. These
85 tests are reported, but their presence does not affect
86 the results.
88 expire=YYYYMMDD After this date, this test will produce an error
89 whether it is in the manifest or not.
91 state: One of UNRESOLVED, XPASS or FAIL.
92 name: File name for the test.
93 description: String describing the test (flags used, dejagnu message, etc)
94 """
96 def __init__(self, summary_line):
97 try:
98 self.attrs = ''
99 if '|' in summary_line:
100 (self.attrs, summary_line) = summary_line.split('|', 1)
101 try:
102 (self.state,
103 self.name,
104 self.description) = re.match(r' *([A-Z]+):\s*(\S+)\s+(.*)',
105 summary_line).groups()
106 except:
107 print 'Failed to parse summary line: "%s"' % summary_line
108 raise
109 self.attrs = self.attrs.strip()
110 self.state = self.state.strip()
111 self.description = self.description.strip()
112 except ValueError:
113 Error('Cannot parse summary line "%s"' % summary_line)
115 if self.state not in _VALID_TEST_RESULTS:
116 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
117 self.state, summary_line, self))
119 def __lt__(self, other):
120 return self.name < other.name
122 def __hash__(self):
123 return hash(self.state) ^ hash(self.name) ^ hash(self.description)
125 def __eq__(self, other):
126 return (self.state == other.state and
127 self.name == other.name and
128 self.description == other.description)
130 def __ne__(self, other):
131 return not (self == other)
133 def __str__(self):
134 attrs = ''
135 if self.attrs:
136 attrs = '%s | ' % self.attrs
137 return '%s%s: %s %s' % (attrs, self.state, self.name, self.description)
139 def ExpirationDate(self):
140 # Return a datetime.date object with the expiration date for this
141 # test result. Return None, if no expiration has been set.
142 if re.search(r'expire=', self.attrs):
143 expiration = re.search(r'expire=(\d\d\d\d)(\d\d)(\d\d)', self.attrs)
144 if not expiration:
145 Error('Invalid expire= format in "%s". Must be of the form '
146 '"expire=YYYYMMDD"' % self)
147 return datetime.date(int(expiration.group(1)),
148 int(expiration.group(2)),
149 int(expiration.group(3)))
150 return None
152 def HasExpired(self):
153 # Return True if the expiration date of this result has passed.
154 expiration_date = self.ExpirationDate()
155 if expiration_date:
156 now = datetime.date.today()
157 return now > expiration_date
160 def GetMakefileValue(makefile_name, value_name):
161 if os.path.exists(makefile_name):
162 makefile = open(makefile_name)
163 for line in makefile:
164 if line.startswith(value_name):
165 (_, value) = line.split('=', 1)
166 value = value.strip()
167 makefile.close()
168 return value
169 makefile.close()
170 return None
173 def ValidBuildDirectory(builddir, target):
174 if (not os.path.exists(builddir) or
175 not os.path.exists('%s/Makefile' % builddir) or
176 (not os.path.exists('%s/build-%s' % (builddir, target)) and
177 not os.path.exists('%s/%s' % (builddir, target)))):
178 return False
179 return True
182 def IsInterestingResult(line):
183 """Return True if the given line is one of the summary lines we care about."""
184 line = line.strip()
185 if line.startswith('#'):
186 return False
187 if '|' in line:
188 (_, line) = line.split('|', 1)
189 line = line.strip()
190 for result in _VALID_TEST_RESULTS:
191 if line.startswith(result):
192 return True
193 return False
196 def ParseSummary(sum_fname):
197 """Create a set of TestResult instances from the given summary file."""
198 result_set = set()
199 sum_file = open(sum_fname)
200 for line in sum_file:
201 if IsInterestingResult(line):
202 result = TestResult(line)
203 if result.HasExpired():
204 # Tests that have expired are not added to the set of expected
205 # results. If they are still present in the set of actual results,
206 # they will cause an error to be reported.
207 print 'WARNING: Expected failure "%s" has expired.' % line.strip()
208 continue
209 result_set.add(result)
210 sum_file.close()
211 return result_set
214 def GetManifest(manifest_name):
215 """Build a set of expected failures from the manifest file.
217 Each entry in the manifest file should have the format understood
218 by the TestResult constructor.
220 If no manifest file exists for this target, it returns an empty
221 set.
223 if os.path.exists(manifest_name):
224 return ParseSummary(manifest_name)
225 else:
226 return set()
229 def CollectSumFiles(builddir):
230 sum_files = []
231 for root, dirs, files in os.walk(builddir):
232 if '.svn' in dirs:
233 dirs.remove('.svn')
234 for fname in files:
235 if fname.endswith('.sum'):
236 sum_files.append(os.path.join(root, fname))
237 return sum_files
240 def GetResults(sum_files):
241 """Collect all the test results from the given .sum files."""
242 build_results = set()
243 for sum_fname in sum_files:
244 print '\t%s' % sum_fname
245 build_results |= ParseSummary(sum_fname)
246 return build_results
249 def CompareResults(manifest, actual):
250 """Compare sets of results and return two lists:
251 - List of results present in ACTUAL but missing from MANIFEST.
252 - List of results present in MANIFEST but missing from ACTUAL.
254 # Collect all the actual results not present in the manifest.
255 # Results in this set will be reported as errors.
256 actual_vs_manifest = set()
257 for actual_result in actual:
258 if actual_result not in manifest:
259 actual_vs_manifest.add(actual_result)
261 # Collect all the tests in the manifest that were not found
262 # in the actual results.
263 # Results in this set will be reported as warnings (since
264 # they are expected failures that are not failing anymore).
265 manifest_vs_actual = set()
266 for expected_result in manifest:
267 # Ignore tests marked flaky.
268 if 'flaky' in expected_result.attrs:
269 continue
270 if expected_result not in actual:
271 manifest_vs_actual.add(expected_result)
273 return actual_vs_manifest, manifest_vs_actual
276 def GetBuildData(options):
277 target = GetMakefileValue('%s/Makefile' % options.build_dir, 'target_alias=')
278 srcdir = GetMakefileValue('%s/Makefile' % options.build_dir, 'srcdir =')
279 if not ValidBuildDirectory(options.build_dir, target):
280 Error('%s is not a valid GCC top level build directory.' %
281 options.build_dir)
282 print 'Source directory: %s' % srcdir
283 print 'Build target: %s' % target
284 return srcdir, target, True
287 def PrintSummary(msg, summary):
288 print '\n\n%s' % msg
289 for result in sorted(summary):
290 print result
293 def GetSumFiles(results, build_dir):
294 if not results:
295 print 'Getting actual results from build directory %s' % build_dir
296 sum_files = CollectSumFiles(build_dir)
297 else:
298 print 'Getting actual results from user-provided results'
299 sum_files = results.split()
300 return sum_files
303 def PerformComparison(expected, actual, ignore_missing_failures):
304 actual_vs_expected, expected_vs_actual = CompareResults(expected, actual)
306 tests_ok = True
307 if len(actual_vs_expected) > 0:
308 PrintSummary('Unexpected results in this build (new failures)',
309 actual_vs_expected)
310 tests_ok = False
312 if not ignore_missing_failures and len(expected_vs_actual) > 0:
313 PrintSummary('Expected results not present in this build (fixed tests)'
314 '\n\nNOTE: This is not a failure. It just means that these '
315 'tests were expected\nto fail, but they worked in this '
316 'configuration.\n', expected_vs_actual)
318 if tests_ok:
319 print '\nSUCCESS: No unexpected failures.'
321 return tests_ok
324 def CheckExpectedResults(options):
325 if not options.manifest:
326 (srcdir, target, valid_build) = GetBuildData(options)
327 if not valid_build:
328 return False
329 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
330 else:
331 manifest_name = options.manifest
332 if not os.path.exists(manifest_name):
333 Error('Manifest file %s does not exist.' % manifest_name)
335 print 'Manifest: %s' % manifest_name
336 manifest = GetManifest(manifest_name)
337 sum_files = GetSumFiles(options.results, options.build_dir)
338 actual = GetResults(sum_files)
340 if options.verbosity >= 1:
341 PrintSummary('Tests expected to fail', manifest)
342 PrintSummary('\nActual test results', actual)
344 return PerformComparison(manifest, actual, options.ignore_missing_failures)
347 def ProduceManifest(options):
348 (srcdir, target, valid_build) = GetBuildData(options)
349 if not valid_build:
350 return False
352 manifest_name = _MANIFEST_PATH_PATTERN % (srcdir, target)
353 if os.path.exists(manifest_name) and not options.force:
354 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
355 manifest_name)
357 sum_files = GetSumFiles(options.results, options.build_dir)
358 actual = GetResults(sum_files)
359 manifest_file = open(manifest_name, 'w')
360 for result in sorted(actual):
361 print result
362 manifest_file.write('%s\n' % result)
363 manifest_file.close()
365 return True
368 def CompareBuilds(options):
369 (srcdir, target, valid_build) = GetBuildData(options)
370 if not valid_build:
371 return False
373 sum_files = GetSumFiles(options.results, options.build_dir)
374 actual = GetResults(sum_files)
376 clean_sum_files = GetSumFiles(None, options.clean_build)
377 clean = GetResults(clean_sum_files)
379 return PerformComparison(clean, actual, options.ignore_missing_failures)
382 def Main(argv):
383 parser = optparse.OptionParser(usage=__doc__)
385 # Keep the following list sorted by option name.
386 parser.add_option('--build_dir', action='store', type='string',
387 dest='build_dir', default='.',
388 help='Build directory to check (default = .)')
389 parser.add_option('--clean_build', action='store', type='string',
390 dest='clean_build', default=None,
391 help='Compare test results from this build against '
392 'those of another (clean) build. Use this option '
393 'when comparing the test results of your patch versus '
394 'the test results of a clean build without your patch. '
395 'You must provide the path to the top directory of your '
396 'clean build.')
397 parser.add_option('--force', action='store_true', dest='force',
398 default=False, help='When used with --produce_manifest, '
399 'it will overwrite an existing manifest file '
400 '(default = False)')
401 parser.add_option('--ignore_missing_failures', action='store_true',
402 dest='ignore_missing_failures', default=False,
403 help='When a failure is expected in the manifest but '
404 'it is not found in the actual results, the script '
405 'produces a note alerting to this fact. This means '
406 'that the expected failure has been fixed, or '
407 'it did not run, or it may simply be flaky '
408 '(default = False)')
409 parser.add_option('--manifest', action='store', type='string',
410 dest='manifest', default=None,
411 help='Name of the manifest file to use (default = '
412 'taken from contrib/testsuite-managment/<target>.xfail)')
413 parser.add_option('--produce_manifest', action='store_true',
414 dest='produce_manifest', default=False,
415 help='Produce the manifest for the current '
416 'build (default = False)')
417 parser.add_option('--results', action='store', type='string',
418 dest='results', default=None, help='Space-separated list '
419 'of .sum files with the testing results to check. The '
420 'only content needed from these files are the lines '
421 'starting with FAIL, XPASS or UNRESOLVED (default = '
422 '.sum files collected from the build directory).')
423 parser.add_option('--verbosity', action='store', dest='verbosity',
424 type='int', default=0, help='Verbosity level (default = 0)')
425 (options, _) = parser.parse_args(argv[1:])
427 if options.produce_manifest:
428 retval = ProduceManifest(options)
429 elif options.clean_build:
430 retval = CompareBuilds(options)
431 else:
432 retval = CheckExpectedResults(options)
434 if retval:
435 return 0
436 else:
437 return 1
439 if __name__ == '__main__':
440 retval = Main(sys.argv)
441 sys.exit(retval)