3 # Carlos Rafael Giani, 2006
4 # Thomas Nagy, 2010-2018 (ita)
7 Unit testing system for C/C++/D and interpreted languages providing test execution:
9 * in parallel, by using ``waf -j``
10 * partial (only the tests that have changed) or full (by using ``waf --alltests``)
12 The tests are declared by adding the **test** feature to programs::
15 opt.load('compiler_cxx waf_unit_test')
17 conf.load('compiler_cxx waf_unit_test')
19 bld(features='cxx cxxprogram test', source='main.cpp', target='app')
21 bld.program(features='test', source='main2.cpp', target='app2')
23 When the build is executed, the program 'test' will be built and executed without arguments.
24 The success/failure is detected by looking at the return code. The status and the standard output/error
25 are stored on the build context.
27 The results can be displayed by registering a callback function. Here is how to call
28 the predefined callback::
31 bld(features='cxx cxxprogram test', source='main.c', target='app')
32 from waflib.Tools import waf_unit_test
33 bld.add_post_fun(waf_unit_test.summary)
35 By passing --dump-test-scripts the build outputs corresponding python files
36 (with extension _run.py) that are useful for debugging purposes.
40 from waflib
.TaskGen
import feature
, after_method
, taskgen_method
41 from waflib
import Utils
, Task
, Logs
, Options
42 from waflib
.Tools
import ccroot
43 testlock
= Utils
.threading
.Lock()
45 SCRIPT_TEMPLATE
= """#! %(python)s
46 import subprocess, sys
48 # if you want to debug with gdb:
49 #cmd = ['gdb', '-args'] + cmd
51 status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str))
56 def handle_ut_cwd(self
, key
):
58 Task generator method, used internally to limit code duplication.
59 This method may disappear anytime.
61 cwd
= getattr(self
, key
, None)
63 if isinstance(cwd
, str):
64 # we want a Node instance
65 if os
.path
.isabs(cwd
):
66 self
.ut_cwd
= self
.bld
.root
.make_node(cwd
)
68 self
.ut_cwd
= self
.path
.make_node(cwd
)
70 @feature('test_scripts')
71 def make_interpreted_test(self
):
72 """Create interpreted unit tests."""
73 for x
in ['test_scripts_source', 'test_scripts_template']:
74 if not hasattr(self
, x
):
75 Logs
.warn('a test_scripts taskgen i missing %s' % x
)
78 self
.ut_run
, lst
= Task
.compile_fun(self
.test_scripts_template
, shell
=getattr(self
, 'test_scripts_shell', False))
80 script_nodes
= self
.to_nodes(self
.test_scripts_source
)
81 for script_node
in script_nodes
:
82 tsk
= self
.create_task('utest', [script_node
])
83 tsk
.vars = lst
+ tsk
.vars
84 tsk
.env
['SCRIPT'] = script_node
.path_from(tsk
.get_cwd())
86 self
.handle_ut_cwd('test_scripts_cwd')
88 env
= getattr(self
, 'test_scripts_env', None)
92 self
.ut_env
= dict(os
.environ
)
94 paths
= getattr(self
, 'test_scripts_paths', {})
95 for (k
,v
) in paths
.items():
96 p
= self
.ut_env
.get(k
, '').split(os
.pathsep
)
97 if isinstance(v
, str):
98 v
= v
.split(os
.pathsep
)
99 self
.ut_env
[k
] = os
.pathsep
.join(p
+ v
)
102 @after_method('apply_link', 'process_use')
104 """Create the unit test task. There can be only one unit test task by task generator."""
105 if not getattr(self
, 'link_task', None):
108 tsk
= self
.create_task('utest', self
.link_task
.outputs
)
109 if getattr(self
, 'ut_str', None):
110 self
.ut_run
, lst
= Task
.compile_fun(self
.ut_str
, shell
=getattr(self
, 'ut_shell', False))
111 tsk
.vars = lst
+ tsk
.vars
113 self
.handle_ut_cwd('ut_cwd')
115 if not hasattr(self
, 'ut_paths'):
117 for x
in self
.tmp_use_sorted
:
119 y
= self
.bld
.get_tgen_by_name(x
).link_task
120 except AttributeError:
123 if not isinstance(y
, ccroot
.stlink_task
):
124 paths
.append(y
.outputs
[0].parent
.abspath())
125 self
.ut_paths
= os
.pathsep
.join(paths
) + os
.pathsep
127 if not hasattr(self
, 'ut_env'):
128 self
.ut_env
= dct
= dict(os
.environ
)
130 dct
[var
] = self
.ut_paths
+ dct
.get(var
,'')
133 elif Utils
.unversioned_sys_platform() == 'darwin':
134 add_path('DYLD_LIBRARY_PATH')
135 add_path('LD_LIBRARY_PATH')
137 add_path('LD_LIBRARY_PATH')
139 if not hasattr(self
, 'ut_cmd'):
140 self
.ut_cmd
= getattr(Options
.options
, 'testcmd', False)
143 def add_test_results(self
, tup
):
144 """Override and return tup[1] to interrupt the build immediately if a test does not run"""
145 Logs
.debug("ut: %r", tup
)
147 self
.utest_results
.append(tup
)
148 except AttributeError:
149 self
.utest_results
= [tup
]
151 self
.bld
.utest_results
.append(tup
)
152 except AttributeError:
153 self
.bld
.utest_results
= [tup
]
156 class utest(Task
.Task
):
161 after
= ['vnum', 'inst']
164 def runnable_status(self
):
166 Always execute the task if `waf --alltests` was used or no
167 tests if ``waf --notests`` was used
169 if getattr(Options
.options
, 'no_tests', False):
172 ret
= super(utest
, self
).runnable_status()
173 if ret
== Task
.SKIP_ME
:
174 if getattr(Options
.options
, 'all_tests', False):
178 def get_test_env(self
):
180 In general, tests may require any library built anywhere in the project.
181 Override this method if fewer paths are needed
183 return self
.generator
.ut_env
186 super(utest
, self
).post_run()
187 if getattr(Options
.options
, 'clear_failed_tests', False) and self
.waf_unit_test_results
[1]:
188 self
.generator
.bld
.task_sigs
[self
.uid()] = None
192 Execute the test. The execution is always successful, and the results
193 are stored on ``self.generator.bld.utest_results`` for postprocessing.
195 Override ``add_test_results`` to interrupt the build
197 if hasattr(self
.generator
, 'ut_run'):
198 return self
.generator
.ut_run(self
)
200 self
.ut_exec
= getattr(self
.generator
, 'ut_exec', [self
.inputs
[0].abspath()])
201 ut_cmd
= getattr(self
.generator
, 'ut_cmd', False)
203 self
.ut_exec
= shlex
.split(ut_cmd
% ' '.join(self
.ut_exec
))
205 return self
.exec_command(self
.ut_exec
)
207 def exec_command(self
, cmd
, **kw
):
208 self
.generator
.bld
.log_command(cmd
, kw
)
209 if getattr(Options
.options
, 'dump_test_scripts', False):
210 script_code
= SCRIPT_TEMPLATE
% {
211 'python': sys
.executable
,
212 'env': self
.get_test_env(),
213 'cwd': self
.get_cwd().abspath(),
216 script_file
= self
.inputs
[0].abspath() + '_run.py'
217 Utils
.writef(script_file
, script_code
)
218 os
.chmod(script_file
, Utils
.O755
)
220 Logs
.info('Test debug file written as %r' % script_file
)
222 proc
= Utils
.subprocess
.Popen(cmd
, cwd
=self
.get_cwd().abspath(), env
=self
.get_test_env(),
223 stderr
=Utils
.subprocess
.PIPE
, stdout
=Utils
.subprocess
.PIPE
, shell
=isinstance(cmd
,str))
224 (stdout
, stderr
) = proc
.communicate()
225 self
.waf_unit_test_results
= tup
= (self
.inputs
[0].abspath(), proc
.returncode
, stdout
, stderr
)
228 return self
.generator
.add_test_results(tup
)
233 return getattr(self
.generator
, 'ut_cwd', self
.inputs
[0].parent
)
237 Display an execution summary::
240 bld(features='cxx cxxprogram test', source='main.c', target='app')
241 from waflib.Tools import waf_unit_test
242 bld.add_post_fun(waf_unit_test.summary)
244 lst
= getattr(bld
, 'utest_results', [])
246 Logs
.pprint('CYAN', 'execution summary')
249 tfail
= len([x
for x
in lst
if x
[1]])
251 Logs
.pprint('GREEN', ' tests that pass %d/%d' % (total
-tfail
, total
))
252 for (f
, code
, out
, err
) in lst
:
254 Logs
.pprint('GREEN', ' %s' % f
)
256 Logs
.pprint('GREEN' if tfail
== 0 else 'RED', ' tests that fail %d/%d' % (tfail
, total
))
257 for (f
, code
, out
, err
) in lst
:
259 Logs
.pprint('RED', ' %s' % f
)
261 def set_exit_code(bld
):
263 If any of the tests fail waf will exit with that exit code.
264 This is useful if you have an automated build system which need
265 to report on errors from the tests.
266 You may use it like this:
269 bld(features='cxx cxxprogram test', source='main.c', target='app')
270 from waflib.Tools import waf_unit_test
271 bld.add_post_fun(waf_unit_test.set_exit_code)
273 lst
= getattr(bld
, 'utest_results', [])
274 for (f
, code
, out
, err
) in lst
:
278 msg
.append('stdout:%s%s' % (os
.linesep
, out
.decode('utf-8')))
280 msg
.append('stderr:%s%s' % (os
.linesep
, err
.decode('utf-8')))
281 bld
.fatal(os
.linesep
.join(msg
))
286 Provide the ``--alltests``, ``--notests`` and ``--testcmd`` command-line options.
288 opt
.add_option('--notests', action
='store_true', default
=False, help='Exec no unit tests', dest
='no_tests')
289 opt
.add_option('--alltests', action
='store_true', default
=False, help='Exec all unit tests', dest
='all_tests')
290 opt
.add_option('--clear-failed', action
='store_true', default
=False,
291 help='Force failed unit tests to run again next time', dest
='clear_failed_tests')
292 opt
.add_option('--testcmd', action
='store', default
=False, dest
='testcmd',
293 help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind')
294 opt
.add_option('--dump-test-scripts', action
='store_true', default
=False,
295 help='Create python scripts to help debug tests', dest
='dump_test_scripts')