3 # Script to compare testsuite failures against a list of known-to-fail
6 # Contributed by Diego Novillo <dnovillo@google.com>
8 # Copyright (C) 2011, 2012 Free Software Foundation, Inc.
10 # This file is part of GCC.
12 # GCC is free software; you can redistribute it and/or modify
13 # it under the terms of the GNU General Public License as published by
14 # the Free Software Foundation; either version 3, or (at your option)
17 # GCC is distributed in the hope that it will be useful,
18 # but WITHOUT ANY WARRANTY; without even the implied warranty of
19 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 # GNU General Public License for more details.
22 # You should have received a copy of the GNU General Public License
23 # along with GCC; see the file COPYING. If not, write to
24 # the Free Software Foundation, 51 Franklin Street, Fifth Floor,
25 # Boston, MA 02110-1301, USA.
27 """This script provides a coarser XFAILing mechanism that requires no
28 detailed DejaGNU markings. This is useful in a variety of scenarios:
30 - Development branches with many known failures waiting to be fixed.
31 - Release branches with known failures that are not considered
32 important for the particular release criteria used in that branch.
34 The script must be executed from the toplevel build directory. When
37 1- Determine the target built: TARGET
38 2- Determine the source directory: SRCDIR
39 3- Look for a failure manifest file in
40 <SRCDIR>/<MANIFEST_SUBDIR>/<MANIFEST_NAME>.xfail
41 4- Collect all the <tool>.sum files from the build tree.
42 5- Produce a report stating:
43 a- Failures expected in the manifest but not present in the build.
44 b- Failures in the build not expected in the manifest.
45 6- If all the build failures are expected in the manifest, it exits
46 with exit code 0. Otherwise, it exits with error code 1.
48 Manifest files contain expected DejaGNU results that are otherwise
50 They may also contain additional text:
52 # This is a comment. - self explanatory
53 @include file - the file is a path relative to the includer
54 @remove result text - result text is removed from the expected set
63 # Handled test results.
64 _VALID_TEST_RESULTS
= [ 'FAIL', 'UNRESOLVED', 'XPASS', 'ERROR' ]
66 # Subdirectory of srcdir in which to find the manifest file.
67 _MANIFEST_SUBDIR
= 'contrib/testsuite-management'
69 # Pattern for naming manifest files.
70 # The first argument should be the toplevel GCC(/GNU tool) source directory.
71 # The second argument is the manifest subdir.
72 # The third argument is the manifest target, which defaults to the target
73 # triplet used during the build.
74 _MANIFEST_PATH_PATTERN
= '%s/%s/%s.xfail'
76 # The options passed to the program.
80 print >>sys
.stderr
, '\nerror: %s' % msg
84 class TestResult(object):
85 """Describes a single DejaGNU test result as emitted in .sum files.
87 We are only interested in representing unsuccessful tests. So, only
88 a subset of all the tests are loaded.
90 The summary line used to build the test result should have this format:
92 attrlist | XPASS: gcc.dg/unroll_1.c (test for excess errors)
93 ^^^^^^^^ ^^^^^ ^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^
94 optional state name description
98 attrlist: A comma separated list of attributes.
100 flaky Indicates that this test may not always fail. These
101 tests are reported, but their presence does not affect
104 expire=YYYYMMDD After this date, this test will produce an error
105 whether it is in the manifest or not.
107 state: One of UNRESOLVED, XPASS or FAIL.
108 name: File name for the test.
109 description: String describing the test (flags used, dejagnu message, etc)
110 ordinal: Monotonically increasing integer.
111 It is used to keep results for one .exp file sorted
112 by the order the tests were run.
115 def __init__(self
, summary_line
, ordinal
=-1):
118 if '|' in summary_line
:
119 (self
.attrs
, summary_line
) = summary_line
.split('|', 1)
123 self
.description
) = re
.match(r
' *([A-Z]+):\s*(\S+)\s+(.*)',
124 summary_line
).groups()
126 print 'Failed to parse summary line: "%s"' % summary_line
128 self
.attrs
= self
.attrs
.strip()
129 self
.state
= self
.state
.strip()
130 self
.description
= self
.description
.strip()
131 self
.ordinal
= ordinal
133 Error('Cannot parse summary line "%s"' % summary_line
)
135 if self
.state
not in _VALID_TEST_RESULTS
:
136 Error('Invalid test result %s in "%s" (parsed as "%s")' % (
137 self
.state
, summary_line
, self
))
139 def __lt__(self
, other
):
140 return (self
.name
< other
.name
or
141 (self
.name
== other
.name
and self
.ordinal
< other
.ordinal
))
144 return hash(self
.state
) ^
hash(self
.name
) ^
hash(self
.description
)
146 def __eq__(self
, other
):
147 return (self
.state
== other
.state
and
148 self
.name
== other
.name
and
149 self
.description
== other
.description
)
151 def __ne__(self
, other
):
152 return not (self
== other
)
157 attrs
= '%s | ' % self
.attrs
158 return '%s%s: %s %s' % (attrs
, self
.state
, self
.name
, self
.description
)
160 def ExpirationDate(self
):
161 # Return a datetime.date object with the expiration date for this
162 # test result. Return None, if no expiration has been set.
163 if re
.search(r
'expire=', self
.attrs
):
164 expiration
= re
.search(r
'expire=(\d\d\d\d)(\d\d)(\d\d)', self
.attrs
)
166 Error('Invalid expire= format in "%s". Must be of the form '
167 '"expire=YYYYMMDD"' % self
)
168 return datetime
.date(int(expiration
.group(1)),
169 int(expiration
.group(2)),
170 int(expiration
.group(3)))
173 def HasExpired(self
):
174 # Return True if the expiration date of this result has passed.
175 expiration_date
= self
.ExpirationDate()
177 now
= datetime
.date
.today()
178 return now
> expiration_date
181 def GetMakefileValue(makefile_name
, value_name
):
182 if os
.path
.exists(makefile_name
):
183 makefile
= open(makefile_name
)
184 for line
in makefile
:
185 if line
.startswith(value_name
):
186 (_
, value
) = line
.split('=', 1)
187 value
= value
.strip()
194 def ValidBuildDirectory(builddir
, target
):
195 if (not os
.path
.exists(builddir
) or
196 not os
.path
.exists('%s/Makefile' % builddir
) or
197 (not os
.path
.exists('%s/build-%s' % (builddir
, target
)) and
198 not os
.path
.exists('%s/%s' % (builddir
, target
)))):
204 """Return True if line is a comment."""
205 return line
.startswith('#')
208 def IsInterestingResult(line
):
209 """Return True if line is one of the summary lines we care about."""
211 (_
, line
) = line
.split('|', 1)
213 return any(line
.startswith(result
) for result
in _VALID_TEST_RESULTS
)
217 """Return True if line is an include of another file."""
218 return line
.startswith("@include ")
221 def GetIncludeFile(line
, includer
):
222 """Extract the name of the include file from line."""
223 includer_dir
= os
.path
.dirname(includer
)
224 include_file
= line
[len("@include "):]
225 return os
.path
.join(includer_dir
, include_file
.strip())
228 def IsNegativeResult(line
):
229 """Return True if line should be removed from the expected results."""
230 return line
.startswith("@remove ")
233 def GetNegativeResult(line
):
234 """Extract the name of the negative result from line."""
235 line
= line
[len("@remove "):]
239 def ParseManifestWorker(result_set
, manifest_path
):
240 """Read manifest_path, adding the contents to result_set."""
241 if _OPTIONS
.verbosity
>= 1:
242 print 'Parsing manifest file %s.' % manifest_path
243 manifest_file
= open(manifest_path
)
244 for line
in manifest_file
:
248 elif IsComment(line
):
250 elif IsNegativeResult(line
):
251 result_set
.remove(TestResult(GetNegativeResult(line
)))
252 elif IsInclude(line
):
253 ParseManifestWorker(result_set
, GetIncludeFile(line
, manifest_path
))
254 elif IsInterestingResult(line
):
255 result_set
.add(TestResult(line
))
257 Error('Unrecognized line in manifest file: %s' % line
)
258 manifest_file
.close()
261 def ParseManifest(manifest_path
):
262 """Create a set of TestResult instances from the given manifest file."""
264 ParseManifestWorker(result_set
, manifest_path
)
268 def ParseSummary(sum_fname
):
269 """Create a set of TestResult instances from the given summary file."""
271 # ordinal is used when sorting the results so that tests within each
272 # .exp file are kept sorted.
274 sum_file
= open(sum_fname
)
275 for line
in sum_file
:
276 if IsInterestingResult(line
):
277 result
= TestResult(line
, ordinal
)
279 if result
.HasExpired():
280 # Tests that have expired are not added to the set of expected
281 # results. If they are still present in the set of actual results,
282 # they will cause an error to be reported.
283 print 'WARNING: Expected failure "%s" has expired.' % line
.strip()
285 result_set
.add(result
)
290 def GetManifest(manifest_path
):
291 """Build a set of expected failures from the manifest file.
293 Each entry in the manifest file should have the format understood
294 by the TestResult constructor.
296 If no manifest file exists for this target, it returns an empty set.
298 if os
.path
.exists(manifest_path
):
299 return ParseManifest(manifest_path
)
304 def CollectSumFiles(builddir
):
306 for root
, dirs
, files
in os
.walk(builddir
):
307 for ignored
in ('.svn', '.git'):
311 if fname
.endswith('.sum'):
312 sum_files
.append(os
.path
.join(root
, fname
))
316 def GetResults(sum_files
):
317 """Collect all the test results from the given .sum files."""
318 build_results
= set()
319 for sum_fname
in sum_files
:
320 print '\t%s' % sum_fname
321 build_results |
= ParseSummary(sum_fname
)
325 def CompareResults(manifest
, actual
):
326 """Compare sets of results and return two lists:
327 - List of results present in ACTUAL but missing from MANIFEST.
328 - List of results present in MANIFEST but missing from ACTUAL.
330 # Collect all the actual results not present in the manifest.
331 # Results in this set will be reported as errors.
332 actual_vs_manifest
= set()
333 for actual_result
in actual
:
334 if actual_result
not in manifest
:
335 actual_vs_manifest
.add(actual_result
)
337 # Collect all the tests in the manifest that were not found
338 # in the actual results.
339 # Results in this set will be reported as warnings (since
340 # they are expected failures that are not failing anymore).
341 manifest_vs_actual
= set()
342 for expected_result
in manifest
:
343 # Ignore tests marked flaky.
344 if 'flaky' in expected_result
.attrs
:
346 if expected_result
not in actual
:
347 manifest_vs_actual
.add(expected_result
)
349 return actual_vs_manifest
, manifest_vs_actual
352 def GetManifestPath(srcdir
, target
, user_provided_must_exist
):
353 """Return the full path to the manifest file."""
354 manifest_path
= _OPTIONS
.manifest
356 if user_provided_must_exist
and not os
.path
.exists(manifest_path
):
357 Error('Manifest does not exist: %s' % manifest_path
)
360 return _MANIFEST_PATH_PATTERN
% (srcdir
, _MANIFEST_SUBDIR
, target
)
364 target
= GetMakefileValue('%s/Makefile' % _OPTIONS
.build_dir
, 'target_alias=')
365 srcdir
= GetMakefileValue('%s/Makefile' % _OPTIONS
.build_dir
, 'srcdir =')
366 if not ValidBuildDirectory(_OPTIONS
.build_dir
, target
):
367 Error('%s is not a valid GCC top level build directory.' %
369 print 'Source directory: %s' % srcdir
370 print 'Build target: %s' % target
371 return srcdir
, target
374 def PrintSummary(msg
, summary
):
376 for result
in sorted(summary
):
380 def GetSumFiles(results
, build_dir
):
382 print 'Getting actual results from build directory %s' % build_dir
383 sum_files
= CollectSumFiles(build_dir
)
385 print 'Getting actual results from user-provided results'
386 sum_files
= results
.split()
390 def PerformComparison(expected
, actual
, ignore_missing_failures
):
391 actual_vs_expected
, expected_vs_actual
= CompareResults(expected
, actual
)
394 if len(actual_vs_expected
) > 0:
395 PrintSummary('Unexpected results in this build (new failures)',
399 if not ignore_missing_failures
and len(expected_vs_actual
) > 0:
400 PrintSummary('Expected results not present in this build (fixed tests)'
401 '\n\nNOTE: This is not a failure. It just means that these '
402 'tests were expected\nto fail, but they worked in this '
403 'configuration.\n', expected_vs_actual
)
406 print '\nSUCCESS: No unexpected failures.'
411 def CheckExpectedResults():
412 (srcdir
, target
) = GetBuildData()
413 manifest_path
= GetManifestPath(srcdir
, target
, True)
414 print 'Manifest: %s' % manifest_path
415 manifest
= GetManifest(manifest_path
)
416 sum_files
= GetSumFiles(_OPTIONS
.results
, _OPTIONS
.build_dir
)
417 actual
= GetResults(sum_files
)
419 if _OPTIONS
.verbosity
>= 1:
420 PrintSummary('Tests expected to fail', manifest
)
421 PrintSummary('\nActual test results', actual
)
423 return PerformComparison(manifest
, actual
, _OPTIONS
.ignore_missing_failures
)
426 def ProduceManifest():
427 (srcdir
, target
) = GetBuildData()
428 manifest_path
= GetManifestPath(srcdir
, target
, False)
429 print 'Manifest: %s' % manifest_path
430 if os
.path
.exists(manifest_path
) and not _OPTIONS
.force
:
431 Error('Manifest file %s already exists.\nUse --force to overwrite.' %
434 sum_files
= GetSumFiles(_OPTIONS
.results
, _OPTIONS
.build_dir
)
435 actual
= GetResults(sum_files
)
436 manifest_file
= open(manifest_path
, 'w')
437 for result
in sorted(actual
):
439 manifest_file
.write('%s\n' % result
)
440 manifest_file
.close()
446 (srcdir
, target
) = GetBuildData()
448 sum_files
= GetSumFiles(_OPTIONS
.results
, _OPTIONS
.build_dir
)
449 actual
= GetResults(sum_files
)
451 clean_sum_files
= GetSumFiles(_OPTIONS
.results
, _OPTIONS
.clean_build
)
452 clean
= GetResults(clean_sum_files
)
454 return PerformComparison(clean
, actual
, _OPTIONS
.ignore_missing_failures
)
458 parser
= optparse
.OptionParser(usage
=__doc__
)
460 # Keep the following list sorted by option name.
461 parser
.add_option('--build_dir', action
='store', type='string',
462 dest
='build_dir', default
='.',
463 help='Build directory to check (default = .)')
464 parser
.add_option('--clean_build', action
='store', type='string',
465 dest
='clean_build', default
=None,
466 help='Compare test results from this build against '
467 'those of another (clean) build. Use this option '
468 'when comparing the test results of your patch versus '
469 'the test results of a clean build without your patch. '
470 'You must provide the path to the top directory of your '
472 parser
.add_option('--force', action
='store_true', dest
='force',
473 default
=False, help='When used with --produce_manifest, '
474 'it will overwrite an existing manifest file '
476 parser
.add_option('--ignore_missing_failures', action
='store_true',
477 dest
='ignore_missing_failures', default
=False,
478 help='When a failure is expected in the manifest but '
479 'it is not found in the actual results, the script '
480 'produces a note alerting to this fact. This means '
481 'that the expected failure has been fixed, or '
482 'it did not run, or it may simply be flaky '
484 parser
.add_option('--manifest', action
='store', type='string',
485 dest
='manifest', default
=None,
486 help='Name of the manifest file to use (default = '
487 'taken from contrib/testsuite-managment/<target_alias>.xfail)')
488 parser
.add_option('--produce_manifest', action
='store_true',
489 dest
='produce_manifest', default
=False,
490 help='Produce the manifest for the current '
491 'build (default = False)')
492 parser
.add_option('--results', action
='store', type='string',
493 dest
='results', default
=None, help='Space-separated list '
494 'of .sum files with the testing results to check. The '
495 'only content needed from these files are the lines '
496 'starting with FAIL, XPASS or UNRESOLVED (default = '
497 '.sum files collected from the build directory).')
498 parser
.add_option('--verbosity', action
='store', dest
='verbosity',
499 type='int', default
=0, help='Verbosity level (default = 0)')
501 (_OPTIONS
, _
) = parser
.parse_args(argv
[1:])
503 if _OPTIONS
.produce_manifest
:
504 retval
= ProduceManifest()
505 elif _OPTIONS
.clean_build
:
506 retval
= CompareBuilds()
508 retval
= CheckExpectedResults()
516 if __name__
== '__main__':
517 retval
= Main(sys
.argv
)