Bumping manifests a=b2g-bump
[gecko.git] / js / src / gdb / run-tests.py
blob1337a5f0aaab5ad381e6771dd1c5033c5dfb0f9e
1 #!/usr/bin/env python
2 # This Source Code Form is subject to the terms of the Mozilla Public
3 # License, v. 2.0. If a copy of the MPL was not distributed with this
4 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
6 # run-tests.py -- Python harness for GDB SpiderMonkey support
8 import os, re, subprocess, sys, traceback
9 from threading import Thread
11 # From this directory:
12 import progressbar
13 from taskpool import TaskPool, get_cpu_count
15 # Backported from Python 3.1 posixpath.py
16 def _relpath(path, start=None):
17 """Return a relative version of a path"""
19 if not path:
20 raise ValueError("no path specified")
22 if start is None:
23 start = os.curdir
25 start_list = os.path.abspath(start).split(os.sep)
26 path_list = os.path.abspath(path).split(os.sep)
28 # Work out how much of the filepath is shared by start and path.
29 i = len(os.path.commonprefix([start_list, path_list]))
31 rel_list = [os.pardir] * (len(start_list)-i) + path_list[i:]
32 if not rel_list:
33 return os.curdir
34 return os.path.join(*rel_list)
36 os.path.relpath = _relpath
38 # Characters that need to be escaped when used in shell words.
39 shell_need_escapes = re.compile('[^\w\d%+,-./:=@\'"]', re.DOTALL)
40 # Characters that need to be escaped within double-quoted strings.
41 shell_dquote_escapes = re.compile('[^\w\d%+,-./:=@"]', re.DOTALL)
42 def make_shell_cmd(l):
43 def quote(s):
44 if shell_need_escapes.search(s):
45 if s.find("'") < 0:
46 return "'" + s + "'"
47 return '"' + shell_dquote_escapes.sub('\\g<0>', s) + '"'
48 return s
50 return ' '.join([quote(_) for _ in l])
52 # An instance of this class collects the lists of passing, failing, and
53 # timing-out tests, runs the progress bar, and prints a summary at the end.
54 class Summary(object):
56 class SummaryBar(progressbar.ProgressBar):
57 def __init__(self, limit):
58 super(Summary.SummaryBar, self).__init__('', limit, 24)
59 def start(self):
60 self.label = '[starting ]'
61 self.update(0)
62 def counts(self, run, failures, timeouts):
63 self.label = '[%4d|%4d|%4d|%4d]' % (run - failures, failures, timeouts, run)
64 self.update(run)
66 def __init__(self, num_tests):
67 self.run = 0
68 self.failures = [] # kind of judgemental; "unexpecteds"?
69 self.timeouts = []
70 if not OPTIONS.hide_progress:
71 self.bar = Summary.SummaryBar(num_tests)
73 # Progress bar control.
74 def start(self):
75 if not OPTIONS.hide_progress:
76 self.bar.start()
77 def update(self):
78 if not OPTIONS.hide_progress:
79 self.bar.counts(self.run, len(self.failures), len(self.timeouts))
80 # Call 'thunk' to show some output, while getting the progress bar out of the way.
81 def interleave_output(self, thunk):
82 if not OPTIONS.hide_progress:
83 self.bar.clear()
84 thunk()
85 self.update()
87 def passed(self, test):
88 self.run += 1
89 self.update()
91 def failed(self, test):
92 self.run += 1
93 self.failures.append(test)
94 self.update()
96 def timeout(self, test):
97 self.run += 1
98 self.timeouts.append(test)
99 self.update()
101 def finish(self):
102 if not OPTIONS.hide_progress:
103 self.bar.finish()
105 if self.failures:
107 print "tests failed:"
108 for test in self.failures:
109 test.show(sys.stdout)
111 if OPTIONS.worklist:
112 try:
113 with open(OPTIONS.worklist) as out:
114 for test in self.failures:
115 out.write(test.name + '\n')
116 except IOError as err:
117 sys.stderr.write("Error writing worklist file '%s': %s"
118 % (OPTIONS.worklist, err))
119 sys.exit(1)
121 if OPTIONS.write_failures:
122 try:
123 with open(OPTIONS.write_failures) as out:
124 for test in self.failures:
125 test.show(out)
126 except IOError as err:
127 sys.stderr.write("Error writing worklist file '%s': %s"
128 % (OPTIONS.write_failures, err))
129 sys.exit(1)
131 if self.timeouts:
132 print "tests timed out:"
133 for test in self.timeouts:
134 test.show(sys.stdout)
136 if self.failures or self.timeouts:
137 sys.exit(2)
139 class Test(TaskPool.Task):
140 def __init__(self, path, summary):
141 super(Test, self).__init__()
142 self.test_path = path # path to .py test file
143 self.summary = summary
145 # test.name is the name of the test relative to the top of the test
146 # directory. This is what we use to report failures and timeouts,
147 # and when writing test lists.
148 self.name = os.path.relpath(self.test_path, OPTIONS.testdir)
150 self.stdout = ''
151 self.stderr = ''
152 self.returncode = None
154 def cmd(self):
155 testlibdir = os.path.normpath(os.path.join(OPTIONS.testdir, '..', 'lib-for-tests'))
156 return [OPTIONS.gdb_executable,
157 '-nw', # Don't create a window (unnecessary?)
158 '-nx', # Don't read .gdbinit.
159 '--ex', 'add-auto-load-safe-path %s' % (OPTIONS.builddir,),
160 '--ex', 'set env LD_LIBRARY_PATH %s' % (OPTIONS.libdir,),
161 '--ex', 'file %s' % (os.path.join(OPTIONS.builddir, 'gdb-tests'),),
162 '--eval-command', 'python testlibdir=%r' % (testlibdir,),
163 '--eval-command', 'python testscript=%r' % (self.test_path,),
164 '--eval-command', 'python execfile(%r)' % os.path.join(testlibdir, 'catcher.py')]
166 def start(self, pipe, deadline):
167 super(Test, self).start(pipe, deadline)
168 if OPTIONS.show_cmd:
169 self.summary.interleave_output(lambda: self.show_cmd(sys.stdout))
171 def onStdout(self, text):
172 self.stdout += text
174 def onStderr(self, text):
175 self.stderr += text
177 def onFinished(self, returncode):
178 self.returncode = returncode
179 if OPTIONS.show_output:
180 self.summary.interleave_output(lambda: self.show_output(sys.stdout))
181 if returncode != 0:
182 self.summary.failed(self)
183 else:
184 self.summary.passed(self)
186 def onTimeout(self):
187 self.summary.timeout(self)
189 def show_cmd(self, out):
190 print "Command: ", make_shell_cmd(self.cmd())
192 def show_output(self, out):
193 if self.stdout:
194 out.write('Standard output:')
195 out.write('\n' + self.stdout + '\n')
196 if self.stderr:
197 out.write('Standard error:')
198 out.write('\n' + self.stderr + '\n')
200 def show(self, out):
201 out.write(self.name + '\n')
202 if OPTIONS.write_failure_output:
203 out.write('Command: %s\n' % (make_shell_cmd(self.cmd()),))
204 self.show_output(out)
205 out.write('GDB exit code: %r\n' % (self.returncode,))
207 def find_tests(dir, substring = None):
208 ans = []
209 for dirpath, dirnames, filenames in os.walk(dir):
210 if dirpath == '.':
211 continue
212 for filename in filenames:
213 if not filename.endswith('.py'):
214 continue
215 test = os.path.join(dirpath, filename)
216 if substring is None or substring in os.path.relpath(test, dir):
217 ans.append(test)
218 return ans
220 def build_test_exec(builddir):
221 p = subprocess.check_call(['make', 'gdb-tests'], cwd=builddir)
223 def run_tests(tests, summary):
224 pool = TaskPool(tests, job_limit=OPTIONS.workercount, timeout=OPTIONS.timeout)
225 pool.run_all()
227 OPTIONS = None
228 def main(argv):
229 global OPTIONS
230 script_path = os.path.abspath(__file__)
231 script_dir = os.path.dirname(script_path)
233 # LIBDIR is the directory in which we find the SpiderMonkey shared
234 # library, to link against.
236 # The [TESTS] optional arguments are paths of test files relative
237 # to the jit-test/tests directory.
238 from optparse import OptionParser
239 op = OptionParser(usage='%prog [options] LIBDIR [TESTS...]')
240 op.add_option('-s', '--show-cmd', dest='show_cmd', action='store_true',
241 help='show GDB shell command run')
242 op.add_option('-o', '--show-output', dest='show_output', action='store_true',
243 help='show output from GDB')
244 op.add_option('-x', '--exclude', dest='exclude', action='append',
245 help='exclude given test dir or path')
246 op.add_option('-t', '--timeout', dest='timeout', type=float, default=150.0,
247 help='set test timeout in seconds')
248 op.add_option('-j', '--worker-count', dest='workercount', type=int,
249 help='Run [WORKERCOUNT] tests at a time')
250 op.add_option('--no-progress', dest='hide_progress', action='store_true',
251 help='hide progress bar')
252 op.add_option('--worklist', dest='worklist', metavar='FILE',
253 help='Read tests to run from [FILE] (or run all if [FILE] not found);\n'
254 'write failures back to [FILE]')
255 op.add_option('-r', '--read-tests', dest='read_tests', metavar='FILE',
256 help='Run test files listed in [FILE]')
257 op.add_option('-w', '--write-failures', dest='write_failures', metavar='FILE',
258 help='Write failing tests to [FILE]')
259 op.add_option('--write-failure-output', dest='write_failure_output', action='store_true',
260 help='With --write-failures=FILE, additionally write the output of failed tests to [FILE]')
261 op.add_option('--gdb', dest='gdb_executable', metavar='EXECUTABLE', default='gdb',
262 help='Run tests with [EXECUTABLE], rather than plain \'gdb\'.')
263 op.add_option('--srcdir', dest='srcdir',
264 default=os.path.abspath(os.path.join(script_dir, '..')),
265 help='Use SpiderMonkey sources in [SRCDIR].')
266 op.add_option('--testdir', dest='testdir', default=os.path.join(script_dir, 'tests'),
267 help='Find tests in [TESTDIR].')
268 op.add_option('--builddir', dest='builddir',
269 help='Build test executable in [BUILDDIR].')
270 (OPTIONS, args) = op.parse_args(argv)
271 if len(args) < 1:
272 op.error('missing LIBDIR argument')
273 OPTIONS.libdir = os.path.abspath(args[0])
274 test_args = args[1:]
276 if not OPTIONS.workercount:
277 OPTIONS.workercount = get_cpu_count()
279 # Compute default for OPTIONS.builddir now, since we've computed OPTIONS.libdir.
280 if not OPTIONS.builddir:
281 OPTIONS.builddir = os.path.join(OPTIONS.libdir, 'gdb')
283 test_set = set()
285 # All the various sources of test names accumulate.
286 if test_args:
287 for arg in test_args:
288 test_set.update(find_tests(OPTIONS.testdir, arg))
289 if OPTIONS.worklist:
290 try:
291 with open(OPTIONS.worklist) as f:
292 for line in f:
293 test_set.update(os.path.join(test_dir, line.strip('\n')))
294 except IOError:
295 # With worklist, a missing file means to start the process with
296 # the complete list of tests.
297 sys.stderr.write("Couldn't read worklist file '%s'; running all tests\n"
298 % (OPTIONS.worklist,))
299 test_set = set(find_tests(OPTIONS.testdir))
300 if OPTIONS.read_tests:
301 try:
302 with open(OPTIONS.read_tests) as f:
303 for line in f:
304 test_set.update(os.path.join(test_dir, line.strip('\n')))
305 except IOError as err:
306 sys.stderr.write("Error trying to read test file '%s': %s\n"
307 % (OPTIONS.read_tests, err))
308 sys.exit(1)
310 # If none of the above options were passed, and no tests were listed
311 # explicitly, use the complete set.
312 if not test_args and not OPTIONS.worklist and not OPTIONS.read_tests:
313 test_set = set(find_tests(OPTIONS.testdir))
315 if OPTIONS.exclude:
316 exclude_set = set()
317 for exclude in OPTIONS.exclude:
318 exclude_set.update(find_tests(test_dir, exclude))
319 test_set -= exclude_set
321 if not test_set:
322 sys.stderr.write("No tests found matching command line arguments.\n")
323 sys.exit(1)
325 summary = Summary(len(test_set))
326 test_list = [ Test(_, summary) for _ in sorted(test_set) ]
328 # Build the test executable from all the .cpp files found in the test
329 # directory tree.
330 try:
331 build_test_exec(OPTIONS.builddir)
332 except subprocess.CalledProcessError as err:
333 sys.stderr.write("Error building test executable: %s\n" % (err,))
334 sys.exit(1)
336 # Run the tests.
337 try:
338 summary.start()
339 run_tests(test_list, summary)
340 summary.finish()
341 except OSError as err:
342 sys.stderr.write("Error running tests: %s\n" % (err,))
343 sys.exit(1)
345 sys.exit(0)
347 if __name__ == '__main__':
348 main(sys.argv[1:])