3 # Carlos Rafael Giani, 2006
6 Unit tests run in the shutdown() method, and for c/c++ programs
8 One should NOT have to give parameters to programs to execute
10 In the shutdown method, add the following code:
13 ... ut = UnitTest.unit_test()
15 ... ut.print_results()
18 Each object to use as a unit test must be a program and must have X{obj.unit_test=1}
21 import Build
, TaskGen
, Utils
, Options
, Logs
, Task
22 from TaskGen
import before
, after
, feature
23 from Constants
import *
25 class unit_test(object):
26 "Unit test representation"
28 self
.returncode_ok
= 0 # Unit test returncode considered OK. All returncodes differing from this one
29 # will cause the unit test to be marked as "FAILED".
31 # The following variables are filled with data by run().
33 # print_results() uses these for printing the unit test summary,
34 # but if there is need for direct access to the results,
35 # they can be retrieved here, after calling run().
37 self
.num_tests_ok
= 0 # Number of successful unit tests
38 self
.num_tests_failed
= 0 # Number of failed unit tests
39 self
.num_tests_err
= 0 # Tests that have not even run
40 self
.total_num_tests
= 0 # Total amount of unit tests
41 self
.max_label_length
= 0 # Maximum label length (pretty-print the output)
43 self
.unit_tests
= Utils
.ordered_dict() # Unit test dictionary. Key: the label (unit test filename relative
44 # to the build dir), value: unit test filename with absolute path
45 self
.unit_test_results
= {} # Dictionary containing the unit test results.
46 # Key: the label, value: result (true = success false = failure)
47 self
.unit_test_erroneous
= {} # Dictionary indicating erroneous unit tests.
48 # Key: the label, value: true = unit test has an error false = unit test is ok
49 self
.change_to_testfile_dir
= False #True if the test file needs to be executed from the same dir
50 self
.want_to_see_test_output
= False #True to see the stdout from the testfile (for example check suites)
51 self
.want_to_see_test_error
= False #True to see the stderr from the testfile (for example check suites)
52 self
.run_if_waf_does
= 'check' #build was the old default
55 "Run the unit tests and gather results (note: no output here)"
58 self
.num_tests_failed
= 0
59 self
.num_tests_err
= 0
60 self
.total_num_tests
= 0
61 self
.max_label_length
= 0
63 self
.unit_tests
= Utils
.ordered_dict()
64 self
.unit_test_results
= {}
65 self
.unit_test_erroneous
= {}
69 # If waf is not building, don't run anything
70 if not Options
.commands
[self
.run_if_waf_does
]: return
72 # Get the paths for the shared libraries, and obtain the unit tests to execute
73 for obj
in Build
.bld
.all_task_gen
:
75 link_task
= obj
.link_task
76 except AttributeError:
79 lib_path
= link_task
.outputs
[0].parent
.abspath(obj
.env
)
80 if lib_path
not in ld_library_path
:
81 ld_library_path
.append(lib_path
)
83 unit_test
= getattr(obj
, 'unit_test', '')
84 if unit_test
and 'cprogram' in obj
.features
:
87 filename
= os
.path
.join(output
.abspath(obj
.env
), obj
.target
)
88 srcdir
= output
.abspath()
89 label
= os
.path
.join(output
.bldpath(obj
.env
), obj
.target
)
90 self
.max_label_length
= max(self
.max_label_length
, len(label
))
91 self
.unit_tests
[label
] = (filename
, srcdir
)
94 self
.total_num_tests
= len(self
.unit_tests
)
95 # Now run the unit tests
96 Utils
.pprint('GREEN', 'Running the unit tests')
100 for label
in self
.unit_tests
.allkeys
:
101 file_and_src
= self
.unit_tests
[label
]
102 filename
= file_and_src
[0]
103 srcdir
= file_and_src
[1]
105 line
= Build
.bld
.progress_line(count
, self
.total_num_tests
, Logs
.colors
.GREEN
, Logs
.colors
.NORMAL
)
106 if Options
.options
.progress_bar
and line
:
107 sys
.stderr
.write(line
)
111 kwargs
['env'] = os
.environ
.copy()
112 if self
.change_to_testfile_dir
:
113 kwargs
['cwd'] = srcdir
114 if not self
.want_to_see_test_output
:
115 kwargs
['stdout'] = Utils
.pproc
.PIPE
# PIPE for ignoring output
116 if not self
.want_to_see_test_error
:
117 kwargs
['stderr'] = Utils
.pproc
.PIPE
# PIPE for ignoring output
120 def add_path(dct
, path
, var
):
121 dct
[var
] = os
.pathsep
.join(Utils
.to_list(path
) + [os
.environ
.get(var
, '')])
122 if sys
.platform
== 'win32':
123 add_path(v
, ld_library_path
, 'PATH')
124 elif sys
.platform
== 'darwin':
125 add_path(v
, ld_library_path
, 'DYLD_LIBRARY_PATH')
126 add_path(v
, ld_library_path
, 'LD_LIBRARY_PATH')
128 add_path(v
, ld_library_path
, 'LD_LIBRARY_PATH')
130 pp
= Utils
.pproc
.Popen(filename
, **kwargs
)
131 (out
, err
) = pp
.communicate() # uh, and the output is ignored?? - fortunately this is going to disappear
133 result
= int(pp
.returncode
== self
.returncode_ok
)
136 self
.num_tests_ok
+= 1
138 self
.num_tests_failed
+= 1
140 self
.unit_test_results
[label
] = result
141 self
.unit_test_erroneous
[label
] = 0
143 self
.unit_test_erroneous
[label
] = 1
144 self
.num_tests_err
+= 1
145 except KeyboardInterrupt:
147 if Options
.options
.progress_bar
: sys
.stdout
.write(Logs
.colors
.cursor_on
)
149 def print_results(self
):
150 "Pretty-prints a summary of all unit tests, along with some statistics"
152 # If waf is not building, don't output anything
153 if not Options
.commands
[self
.run_if_waf_does
]: return
156 # Early quit if no tests were performed
157 if self
.total_num_tests
== 0:
158 p('YELLOW', 'No unit tests present')
161 for label
in self
.unit_tests
.allkeys
:
162 filename
= self
.unit_tests
[label
]
166 try: err
= self
.unit_test_erroneous
[label
]
167 except KeyError: pass
169 try: result
= self
.unit_test_results
[label
]
170 except KeyError: pass
172 n
= self
.max_label_length
- len(label
)
177 line
= '%s %s' % (label
, '.' * n
)
179 if err
: p('RED', '%sERROR' % line
)
180 elif result
: p('GREEN', '%sOK' % line
)
181 else: p('YELLOW', '%sFAILED' % line
)
183 percentage_ok
= float(self
.num_tests_ok
) / float(self
.total_num_tests
) * 100.0
184 percentage_failed
= float(self
.num_tests_failed
) / float(self
.total_num_tests
) * 100.0
185 percentage_erroneous
= float(self
.num_tests_err
) / float(self
.total_num_tests
) * 100.0
188 Successful tests: %i (%.1f%%)
189 Failed tests: %i (%.1f%%)
190 Erroneous tests: %i (%.1f%%)
192 Total number of tests: %i
193 ''' % (self
.num_tests_ok
, percentage_ok
, self
.num_tests_failed
, percentage_failed
,
194 self
.num_tests_err
, percentage_erroneous
, self
.total_num_tests
))
195 p('GREEN', 'Unit tests finished')
198 ############################################################################################
203 The targets with feature 'test' are executed after they are built
204 bld(features='cprogram cc test', ...)
206 To display the results:
208 bld.add_post_fun(UnitTest.summary)
212 testlock
= threading
.Lock()
214 def set_options(opt
):
215 opt
.add_option('--alltests', action
='store_true', default
=True, help='Exec all unit tests', dest
='all_tests')
218 @after('apply_link', 'vars_target_cprogram')
220 if not 'cprogram' in self
.features
:
221 Logs
.error('test cannot be executed %s' % self
)
224 self
.default_install_path
= None
225 self
.create_task('utest', self
.link_task
.outputs
)
231 variant
= self
.env
.variant()
233 filename
= self
.inputs
[0].abspath(self
.env
)
234 self
.ut_exec
= getattr(self
, 'ut_exec', [filename
])
235 if getattr(self
.generator
, 'ut_fun', None):
236 self
.generator
.ut_fun(self
)
239 fu
= getattr(self
.generator
.bld
, 'all_test_paths')
240 except AttributeError:
241 fu
= os
.environ
.copy()
242 self
.generator
.bld
.all_test_paths
= fu
245 for obj
in self
.generator
.bld
.all_task_gen
:
246 link_task
= getattr(obj
, 'link_task', None)
247 if link_task
and link_task
.env
.variant() == variant
:
248 lst
.append(link_task
.outputs
[0].parent
.abspath(obj
.env
))
250 def add_path(dct
, path
, var
):
251 dct
[var
] = os
.pathsep
.join(Utils
.to_list(path
) + [os
.environ
.get(var
, '')])
253 if sys
.platform
== 'win32':
254 add_path(fu
, lst
, 'PATH')
255 elif sys
.platform
== 'darwin':
256 add_path(fu
, lst
, 'DYLD_LIBRARY_PATH')
257 add_path(fu
, lst
, 'LD_LIBRARY_PATH')
259 add_path(fu
, lst
, 'LD_LIBRARY_PATH')
262 cwd
= getattr(self
.generator
, 'ut_cwd', '') or self
.inputs
[0].parent
.abspath(self
.env
)
263 proc
= Utils
.pproc
.Popen(self
.ut_exec
, cwd
=cwd
, env
=fu
, stderr
=Utils
.pproc
.PIPE
, stdout
=Utils
.pproc
.PIPE
)
264 (stdout
, stderr
) = proc
.communicate()
266 tup
= (filename
, proc
.returncode
, stdout
, stderr
)
267 self
.generator
.utest_result
= tup
271 bld
= self
.generator
.bld
272 Logs
.debug("ut: %r", tup
)
274 bld
.utest_results
.append(tup
)
275 except AttributeError:
276 bld
.utest_results
= [tup
]
280 cls
= Task
.task_type_from_func('utest', func
=exec_test
, color
='PINK', ext_in
='.bin')
282 old
= cls
.runnable_status
283 def test_status(self
):
285 if ret
== SKIP_ME
and getattr(Options
.options
, 'all_tests', False):
289 cls
.runnable_status
= test_status
293 lst
= getattr(bld
, 'utest_results', [])
295 Utils
.pprint('CYAN', 'execution summary')
298 tfail
= len([x
for x
in lst
if x
[1]])
300 Utils
.pprint('CYAN', ' tests that pass %d/%d' % (total
-tfail
, total
))
301 for (f
, code
, out
, err
) in lst
:
303 Utils
.pprint('CYAN', ' %s' % f
)
305 Utils
.pprint('CYAN', ' tests that fail %d/%d' % (tfail
, total
))
306 for (f
, code
, out
, err
) in lst
:
308 Utils
.pprint('CYAN', ' %s' % f
)