2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file.
6 """Implements a simple "negative compile" test for C++ on linux.
8 Sometimes a C++ API needs to ensure that various usages cannot compile. To
9 enable unittesting of these assertions, we use this python script to
10 invoke gcc on a source file and assert that compilation fails.
13 http://dev.chromium.org/developers/testing/no-compile-tests
27 # Matches lines that start with #if and have the substring TEST in the
28 # conditional. Also extracts the comment. This allows us to search for
29 # lines like the following:
31 # #ifdef NCTEST_NAME_OF_TEST // [r'expected output']
32 # #if defined(NCTEST_NAME_OF_TEST) // [r'expected output']
33 # #if NCTEST_NAME_OF_TEST // [r'expected output']
34 # #elif NCTEST_NAME_OF_TEST // [r'expected output']
35 # #elif DISABLED_NCTEST_NAME_OF_TEST // [r'expected output']
37 # inside the unittest file.
38 NCTEST_CONFIG_RE
= re
.compile(r
'^#(?:el)?if.*\s+(\S*NCTEST\S*)\s*(//.*)?')
41 # Matches and removes the defined() preprocesor predicate. This is useful
42 # for test cases that use the preprocessor if-statement form:
44 # #if defined(NCTEST_NAME_OF_TEST)
46 # Should be used to post-process the results found by NCTEST_CONFIG_RE.
47 STRIP_DEFINED_RE
= re
.compile(r
'defined\((.*)\)')
50 # Used to grab the expectation from comment at the end of an #ifdef. See
51 # NCTEST_CONFIG_RE's comment for examples of what the format should look like.
53 # The extracted substring should be a python array of regular expressions.
54 EXTRACT_EXPECTATION_RE
= re
.compile(r
'//\s*(\[.*\])')
57 # The header for the result file so that it can be compiled.
58 RESULT_FILE_HEADER
= """
59 // This file is generated by the no compile test from:
62 #include "base/logging.h"
63 #include "testing/gtest/include/gtest/gtest.h"
68 # The GUnit test function to output on a successful test completion.
69 SUCCESS_GUNIT_TEMPLATE
= """
71 LOG(INFO) << "Took %f secs. Started at %f, ended at %f";
75 # The GUnit test function to output for a disabled test.
76 DISABLED_GUNIT_TEMPLATE
= """
82 NCTEST_TERMINATE_TIMEOUT_SEC
= 60
83 NCTEST_KILL_TIMEOUT_SEC
= NCTEST_TERMINATE_TIMEOUT_SEC
+ 2
84 BUSY_LOOP_MAX_TIME_SEC
= NCTEST_KILL_TIMEOUT_SEC
* 2
87 def ValidateInput(parallelism
, sourcefile_path
, cflags
, resultfile_path
):
88 """Make sure the arguments being passed in are sane."""
89 assert parallelism
>= 1
90 assert type(sourcefile_path
) is str
91 assert type(cflags
) is str
92 assert type(resultfile_path
) is str
95 def ParseExpectation(expectation_string
):
96 """Extracts expectation definition from the trailing comment on the ifdef.
98 See the comment on NCTEST_CONFIG_RE for examples of the format we are parsing.
101 expectation_string: A string like "// [r'some_regex']"
104 A list of compiled regular expressions indicating all possible valid
105 compiler outputs. If the list is empty, all outputs are considered valid.
107 assert expectation_string
is not None
109 match
= EXTRACT_EXPECTATION_RE
.match(expectation_string
)
112 raw_expectation
= ast
.literal_eval(match
.group(1))
113 assert type(raw_expectation
) is list
116 for regex_str
in raw_expectation
:
117 assert type(regex_str
) is str
118 expectation
.append(re
.compile(regex_str
))
122 def ExtractTestConfigs(sourcefile_path
):
123 """Parses the soruce file for test configurations.
125 Each no-compile test in the file is separated by an ifdef macro. We scan
126 the source file with the NCTEST_CONFIG_RE to find all ifdefs that look like
127 they demark one no-compile test and try to extract the test configuration
131 sourcefile_path: The path to the source file.
134 A list of test configurations. Each test configuration is a dictionary of
137 { name: 'NCTEST_NAME'
138 suite_name: 'SOURCE_FILE_NAME'
139 expectations: [re.Pattern, re.Pattern] }
141 The |suite_name| is used to generate a pretty gtest output on successful
142 completion of the no compile test.
144 The compiled regexps in |expectations| define the valid outputs of the
145 compiler. If any one of the listed patterns matches either the stderr or
146 stdout from the compilation, and the compilation failed, then the test is
147 considered to have succeeded. If the list is empty, than we ignore the
148 compiler output and just check for failed compilation. If |expectations|
149 is actually None, then this specifies a compiler sanity check test, which
150 should expect a SUCCESSFUL compilation.
152 sourcefile
= open(sourcefile_path
, 'r')
154 # Convert filename from underscores to CamelCase.
155 words
= os
.path
.splitext(os
.path
.basename(sourcefile_path
))[0].split('_')
156 words
= [w
.capitalize() for w
in words
]
157 suite_name
= 'NoCompile' + ''.join(words
)
159 # Start with at least the compiler sanity test. You need to always have one
160 # sanity test to show that compiler flags and configuration are not just
161 # wrong. Otherwise, having a misconfigured compiler, or an error in the
162 # shared portions of the .nc file would cause all tests to erroneously pass.
163 test_configs
= [{'name': 'NCTEST_SANITY',
164 'suite_name': suite_name
,
165 'expectations': None}]
167 for line
in sourcefile
:
168 match_result
= NCTEST_CONFIG_RE
.match(line
)
172 groups
= match_result
.groups()
174 # Grab the name and remove the defined() predicate if there is one.
176 strip_result
= STRIP_DEFINED_RE
.match(name
)
178 name
= strip_result
.group(1)
180 # Read expectations if there are any.
181 test_configs
.append({'name': name
,
182 'suite_name': suite_name
,
183 'expectations': ParseExpectation(groups
[1])})
188 def StartTest(sourcefile_path
, cflags
, config
):
189 """Start one negative compile test.
192 sourcefile_path: The path to the source file.
193 cflags: A string with all the CFLAGS to give to gcc. This string will be
194 split by shelex so be careful with escaping.
195 config: A dictionary describing the test. See ExtractTestConfigs
196 for a description of the config format.
199 A dictionary containing all the information about the started test. The
200 fields in the dictionary are as follows:
201 { 'proc': A subprocess object representing the compiler run.
202 'cmdline': The exectued command line.
203 'name': The name of the test.
204 'suite_name': The suite name to use when generating the gunit test
206 'terminate_timeout': The timestamp in seconds since the epoch after
207 which the test should be terminated.
208 'kill_timeout': The timestamp in seconds since the epoch after which
209 the test should be given a hard kill signal.
210 'started_at': A timestamp in seconds since the epoch for when this test
212 'aborted_at': A timestamp in seconds since the epoch for when this test
213 was aborted. If the test completed successfully,
215 'finished_at': A timestamp in seconds since the epoch for when this
216 test was successfully complete. If the test is aborted,
217 or running, this value is 0.
218 'expectations': A dictionary with the test expectations. See
219 ParseExpectation() for the structure.
222 # TODO(ajwong): Get the compiler from gyp.
223 cmdline
= [os
.path
.join(os
.path
.dirname(os
.path
.realpath(__file__
)),
224 '../third_party/llvm-build/Release+Asserts/bin',
226 cmdline
.extend(shlex
.split(cflags
))
227 name
= config
['name']
228 expectations
= config
['expectations']
229 if expectations
is not None:
230 cmdline
.append('-D%s' % name
)
231 cmdline
.extend(['-std=c++11', '-o', '/dev/null', '-c', '-x', 'c++',
234 process
= subprocess
.Popen(cmdline
, stdout
=subprocess
.PIPE
,
235 stderr
=subprocess
.PIPE
)
237 return {'proc': process
,
238 'cmdline': ' '.join(cmdline
),
240 'suite_name': config
['suite_name'],
241 'terminate_timeout': now
+ NCTEST_TERMINATE_TIMEOUT_SEC
,
242 'kill_timeout': now
+ NCTEST_KILL_TIMEOUT_SEC
,
246 'expectations': expectations
}
249 def PassTest(resultfile
, test
):
250 """Logs the result of a test started by StartTest(), or a disabled test
254 resultfile: File object for .cc file that results are written to.
255 test: An instance of the dictionary returned by StartTest(), a
256 configuration from ExtractTestConfigs().
258 # The 'started_at' key is only added if a test has been started.
259 if 'started_at' in test
:
260 resultfile
.write(SUCCESS_GUNIT_TEMPLATE
% (
261 test
['suite_name'], test
['name'],
262 test
['finished_at'] - test
['started_at'],
263 test
['started_at'], test
['finished_at']))
265 resultfile
.write(DISABLED_GUNIT_TEMPLATE
% (
266 test
['suite_name'], test
['name']))
269 def FailTest(resultfile
, test
, error
, stdout
=None, stderr
=None):
270 """Logs the result of a test started by StartTest()
273 resultfile: File object for .cc file that results are written to.
274 test: An instance of the dictionary returned by StartTest()
275 error: The printable reason for the failure.
276 stdout: The test's output to stdout.
277 stderr: The test's output to stderr.
279 resultfile
.write('#error "%s Failed: %s"\n' % (test
['name'], error
))
280 resultfile
.write('#error "compile line: %s"\n' % test
['cmdline'])
281 if stdout
and len(stdout
) != 0:
282 resultfile
.write('#error "%s stdout:"\n' % test
['name'])
283 for line
in stdout
.split('\n'):
284 resultfile
.write('#error " %s:"\n' % line
)
286 if stderr
and len(stderr
) != 0:
287 resultfile
.write('#error "%s stderr:"\n' % test
['name'])
288 for line
in stderr
.split('\n'):
289 resultfile
.write('#error " %s"\n' % line
)
290 resultfile
.write('\n')
293 def WriteStats(resultfile
, suite_name
, timings
):
294 """Logs the peformance timings for each stage of the script into a fake test.
297 resultfile: File object for .cc file that results are written to.
298 suite_name: The name of the GUnit suite this test belongs to.
299 timings: Dictionary with timestamps for each stage of the script run.
301 stats_template
= ("Started %f, Ended %f, Total %fs, Extract %fs, "
302 "Compile %fs, Process %fs")
303 total_secs
= timings
['results_processed'] - timings
['started']
304 extract_secs
= timings
['extract_done'] - timings
['started']
305 compile_secs
= timings
['compile_done'] - timings
['extract_done']
306 process_secs
= timings
['results_processed'] - timings
['compile_done']
307 resultfile
.write('TEST(%s, Stats) { LOG(INFO) << "%s"; }\n' % (
308 suite_name
, stats_template
% (
309 timings
['started'], timings
['results_processed'], total_secs
,
310 extract_secs
, compile_secs
, process_secs
)))
313 def ProcessTestResult(resultfile
, test
):
314 """Interprets and logs the result of a test started by StartTest()
317 resultfile: File object for .cc file that results are written to.
318 test: The dictionary from StartTest() to process.
320 # Snap a copy of stdout and stderr into the test dictionary immediately
321 # cause we can only call this once on the Popen object, and lots of stuff
322 # below will want access to it.
324 (stdout
, stderr
) = proc
.communicate()
326 if test
['aborted_at'] != 0:
327 FailTest(resultfile
, test
, "Compile timed out. Started %f ended %f." %
328 (test
['started_at'], test
['aborted_at']))
331 if test
['expectations'] is None:
332 # This signals a compiler sanity check test. Fail iff compilation failed.
334 PassTest(resultfile
, test
)
337 FailTest(resultfile
, test
, 'Sanity compile failed. Is compiler borked?',
340 elif proc
.poll() == 0:
341 # Handle failure due to successful compile.
342 FailTest(resultfile
, test
,
343 'Unexpected successful compilation.',
347 # Check the output has the right expectations. If there are no
348 # expectations, then we just consider the output "matched" by default.
349 if len(test
['expectations']) == 0:
350 PassTest(resultfile
, test
)
353 # Otherwise test against all expectations.
354 for regexp
in test
['expectations']:
355 if (regexp
.search(stdout
) is not None or
356 regexp
.search(stderr
) is not None):
357 PassTest(resultfile
, test
)
359 expectation_str
= ', '.join(
360 ["r'%s'" % regexp
.pattern
for regexp
in test
['expectations']])
361 FailTest(resultfile
, test
,
362 'Expectations [%s] did not match output.' % expectation_str
,
367 def CompleteAtLeastOneTest(resultfile
, executing_tests
):
368 """Blocks until at least one task is removed from executing_tests.
370 This function removes completed tests from executing_tests, logging failures
371 and output. If no tests can be removed, it will enter a poll-loop until one
372 test finishes or times out. On a timeout, this function is responsible for
373 terminating the process in the appropriate fashion.
376 executing_tests: A dict mapping a string containing the test name to the
377 test dict return from StartTest().
380 A list of tests that have finished.
383 busy_loop_timeout
= time
.time() + BUSY_LOOP_MAX_TIME_SEC
384 while len(finished_tests
) == 0:
385 # If we don't make progress for too long, assume the code is just dead.
386 assert busy_loop_timeout
> time
.time()
388 # Select on the output pipes.
390 for test
in executing_tests
.values():
391 read_set
.extend([test
['proc'].stderr
, test
['proc'].stdout
])
392 result
= select
.select(read_set
, [], read_set
, NCTEST_TERMINATE_TIMEOUT_SEC
)
394 # Now attempt to process results.
396 for test
in executing_tests
.values():
398 if proc
.poll() is not None:
399 test
['finished_at'] = now
400 finished_tests
.append(test
)
401 elif test
['terminate_timeout'] < now
:
403 test
['aborted_at'] = now
404 elif test
['kill_timeout'] < now
:
406 test
['aborted_at'] = now
408 for test
in finished_tests
:
409 del executing_tests
[test
['name']]
410 return finished_tests
414 if len(sys
.argv
) != 5:
415 print ('Usage: %s <parallelism> <sourcefile> <cflags> <resultfile>' %
419 # Force us into the "C" locale so the compiler doesn't localize its output.
420 # In particular, this stops gcc from using smart quotes when in english UTF-8
421 # locales. This makes the expectation writing much easier.
422 os
.environ
['LC_ALL'] = 'C'
424 parallelism
= int(sys
.argv
[1])
425 sourcefile_path
= sys
.argv
[2]
427 resultfile_path
= sys
.argv
[4]
429 timings
= {'started': time
.time()}
431 ValidateInput(parallelism
, sourcefile_path
, cflags
, resultfile_path
)
433 test_configs
= ExtractTestConfigs(sourcefile_path
)
434 timings
['extract_done'] = time
.time()
436 resultfile
= open(resultfile_path
, 'w')
437 resultfile
.write(RESULT_FILE_HEADER
% sourcefile_path
)
439 # Run the no-compile tests, but ensure we do not run more than |parallelism|
441 timings
['header_written'] = time
.time()
444 for config
in test_configs
:
445 # CompleteAtLeastOneTest blocks until at least one test finishes. Thus, this
446 # acts as a semaphore. We cannot use threads + a real semaphore because
447 # subprocess forks, which can cause all sorts of hilarity with threads.
448 if len(executing_tests
) >= parallelism
:
449 finished_tests
.extend(CompleteAtLeastOneTest(resultfile
, executing_tests
))
451 if config
['name'].startswith('DISABLED_'):
452 PassTest(resultfile
, config
)
454 test
= StartTest(sourcefile_path
, cflags
, config
)
455 assert test
['name'] not in executing_tests
456 executing_tests
[test
['name']] = test
458 # If there are no more test to start, we still need to drain the running
460 while len(executing_tests
) > 0:
461 finished_tests
.extend(CompleteAtLeastOneTest(resultfile
, executing_tests
))
462 timings
['compile_done'] = time
.time()
464 for test
in finished_tests
:
465 ProcessTestResult(resultfile
, test
)
466 timings
['results_processed'] = time
.time()
468 # We always know at least a sanity test was run.
469 WriteStats(resultfile
, finished_tests
[0]['suite_name'], timings
)
474 if __name__
== '__main__':