3 import os
, sys
, unittest
, optparse
5 from autotest_lib
.utils
import parallel
6 from autotest_lib
.client
.common_lib
.test_utils
import unittest
as custom_unittest
8 parser
= optparse
.OptionParser()
9 parser
.add_option("-r", action
="store", type="string", dest
="start",
11 help="root directory to start running unittests")
12 parser
.add_option("--full", action
="store_true", dest
="full", default
=False,
13 help="whether to run the shortened version of the test")
14 parser
.add_option("--debug", action
="store_true", dest
="debug", default
=False,
15 help="run in debug mode")
16 parser
.add_option("--skip-tests", dest
="skip_tests", default
=[],
17 help="A space separated list of tests to skip")
19 parser
.set_defaults(module_list
=None)
22 REQUIRES_DJANGO
= set((
23 'monitor_db_unittest.py',
24 'monitor_db_functional_test.py',
25 'monitor_db_cleanup_test.py',
26 'frontend_unittest.py',
27 'csv_encoder_unittest.py',
28 'rpc_interface_unittest.py',
30 'scheduler_models_unittest.py',
31 'metahost_scheduler_unittest.py',
32 'site_metahost_scheduler_unittest.py',
33 'rpc_utils_unittest.py',
34 'site_rpc_utils_unittest.py',
35 'execution_engine_unittest.py',
36 'service_proxy_lib_test.py',
39 REQUIRES_MYSQLDB
= set((
40 'migrate_unittest.py',
41 'db_utils_unittest.py',
45 'client_compilation_unittest.py',
48 REQUIRES_SIMPLEJSON
= set((
50 'serviceHandler_unittest.py',
53 REQUIRES_AUTH
= set ((
54 'trigger_unittest.py',
57 REQUIRES_HTTPLIB2
= set((
60 REQUIRES_PROTOBUFS
= set((
61 'job_serializer_unittest.py',
65 'base_barrier_unittest.py',
66 'logging_manager_test.py',
69 # This particular KVM autotest test is not a unittest
74 LONG_TESTS
= (REQUIRES_DJANGO |
84 ROOT
= os
.path
.abspath(os
.path
.join(os
.path
.dirname(__file__
), '..'))
87 class TestFailure(Exception): pass
90 def run_test(mod_names
, options
):
92 @param mod_names: A list of individual parts of the module name to import
93 and run as a test suite.
94 @param options: optparse options.
97 parallel
.redirect_io()
99 print "Running %s" % '.'.join(mod_names
)
100 mod
= common
.setup_modules
.import_module(mod_names
[-1],
101 '.'.join(mod_names
[:-1]))
102 for ut_module
in [unittest
, custom_unittest
]:
103 test
= ut_module
.defaultTestLoader
.loadTestsFromModule(mod
)
104 suite
= ut_module
.TestSuite(test
)
105 runner
= ut_module
.TextTestRunner(verbosity
=2)
106 result
= runner
.run(suite
)
107 if result
.errors
or result
.failures
:
108 msg
= '%s had %d failures and %d errors.'
109 msg
%= '.'.join(mod_names
), len(result
.failures
), len(result
.errors
)
110 raise TestFailure(msg
)
113 def scan_for_modules(start
, options
):
117 if options
.skip_tests
:
118 skip_tests
.update(options
.skip_tests
.split())
120 for dirpath
, subdirs
, filenames
in os
.walk(start
):
121 # Only look in and below subdirectories that are python modules.
122 if '__init__.py' not in filenames
:
124 for filename
in filenames
:
125 if filename
.endswith('.pyc'):
126 os
.unlink(os
.path
.join(dirpath
, filename
))
127 # Skip all subdirectories below this one, it is not a module.
130 print 'Skipping', dirpath
131 continue # Skip this directory.
133 # Look for unittest files.
134 for fname
in filenames
:
135 if fname
.endswith('_unittest.py') or fname
.endswith('_test.py'):
136 if not options
.full
and fname
in LONG_TESTS
:
138 if fname
in skip_tests
:
140 path_no_py
= os
.path
.join(dirpath
, fname
).rstrip('.py')
141 assert path_no_py
.startswith(ROOT
)
142 names
= path_no_py
[len(ROOT
)+1:].split('/')
143 modules
.append(['autotest_lib'] + names
)
145 print 'testing', path_no_py
148 def find_and_run_tests(start
, options
):
150 Find and run Python unittest suites below the given directory. Only look
151 in subdirectories of start that are actual importable Python modules.
153 @param start: The absolute directory to look for tests under.
154 @param options: optparse options.
156 if options
.module_list
:
158 for m
in options
.module_list
:
159 modules
.append(m
.split('.'))
161 modules
= scan_for_modules(start
, options
)
164 print 'Number of test modules found:', len(modules
)
167 for module_names
in modules
:
168 # Create a function that'll test a particular module. module=module
169 # is a hack to force python to evaluate the params now. We then
170 # rename the function to make error reporting nicer.
171 run_module
= lambda module
=module_names
: run_test(module
, options
)
172 name
= '.'.join(module_names
)
173 run_module
.__name
__ = name
174 functions
[run_module
] = set()
179 dargs
['max_simultaneous_procs'] = 1
180 pe
= parallel
.ParallelExecute(functions
, **dargs
)
181 pe
.run_until_completion()
182 except parallel
.ParallelError
, err
:
188 options
, args
= parser
.parse_args()
190 options
.module_list
= args
192 # Strip the arguments off the command line, so that the unit tests do not
196 absolute_start
= os
.path
.join(ROOT
, options
.start
)
197 errors
= find_and_run_tests(absolute_start
, options
)
199 print "%d tests resulted in an error/failure:" % len(errors
)
202 print "Rerun", sys
.argv
[0], "--debug to see the failure details."
209 if __name__
== "__main__":