Allow running utimensat test from a directory different from $LTPROOT/testcases/bin
[ltp-debian.git] / execltp.in
bloba073fe2f97d045fd4b0634d5283d988daebb1695
1 #!/usr/bin/env python
2 """
3 An LTP [execution and] parsing wrapper.
5 Used as a second layer for ease-of-use with users as many developers
6 complain about complexity involved with trying to use LTP in my
7 organization -_-.
9 Copyright (C) 2009, Garrett Cooper
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License as published by
13 the Free Software Foundation; either version 2 of the License, or
14 (at your option) any later version.
16 This program is distributed in the hope that it will be useful,
17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details.
21 You should have received a copy of the GNU General Public License along
22 with this program; if not, write to the Free Software Foundation, Inc.,
23 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 """
26 from optparse import OptionGroup, OptionParser
27 import os, re, sys
29 class ResultsParseException(Exception):
30 """ Extended class for parsing LTP results. """
32 def parse_ltp_results(exec_log, output_log, verbose=0):
33 """ Function for parsing LTP results.
35 1. The exec log is the log with the results in summary form.
37 And now a note from our sponsors about exec logs...
39 startup='Thu Oct 1 06:42:07 2009'
40 tag=abort01 stime=1254379327 dur=2 exit=exited stat=0 core=no cu=0 cs=16
41 tag=accept01 stime=1254379329 dur=0 exit=exited stat=0 core=no cu=1 cs=0
42 tag=access01 stime=1254379329 dur=0 exit=exited stat=0 core=no cu=0 cs=0
43 tag=access02 stime=1254379329 dur=0 exit=exited stat=0 core=no cu=0 cs=0
44 tag=access03 stime=1254379329 dur=1 exit=exited stat=0 core=no cu=0 cs=1
46 [...]
48 a. tag is the test tag name.
49 b. stime is the system time at the start of the exec.
50 c. dur is the total duration of the test.
51 d. exit tells you what the result was. Valid values are:
52 - exited
53 - signaled
54 - stopped
55 - unknown
56 See run_child in pan.c.
57 e. stat is the exit status.
58 f. core answers the question: `did I dump core?'.
59 g. cu is the cutime (cumulative user time).
60 h. cs is the cstime (cumulative system time).
62 2. The output log is the log with all of the terse results.
63 3. verbose tells us whether or not we need to include the passed results.
64 """
66 if not os.access(exec_log, os.R_OK):
67 raise ResultsParseException("Exec log - %s - specified doesn't exist"
68 % exec_log)
69 elif 1 < verbose and not os.access(output_log, os.R_OK):
70 # Need the output log for context to the end user.
71 raise ResultsParseException("Output log - %s - specified doesn't exist"
72 % output_log )
74 context = None
76 failed = [ ]
77 passed = 0
79 if 2 <= verbose:
80 passed = [ ]
82 target_vals = ( 'exited', '0', 'no' )
84 fd = open(exec_log, 'r')
86 try:
87 content = fd.read()
88 matches = re.finditer('tag=(?P<tag>\w+).+exit=(?P<exit>\w+) '
89 'stat=(?P<stat>\d+) core=(?P<core>\w+)', content)
90 finally:
91 fd.close()
93 if not matches:
94 raise ResultsParseException("No parseable results were found in the "
95 "exec log - `%s'."% exec_log)
97 for match in matches:
99 if ((match.group('exit'), match.group('stat'), match.group('core')) !=
100 target_vals):
101 failed.append(match.group('tag'))
102 elif 2 <= verbose:
103 passed.append(match.group('tag'))
104 else:
105 passed += 1
107 # Save memory on large files because lists can eat up a fair amount of
108 # memory.
109 matches = None
111 if 1 <= verbose:
113 context = { }
115 search_tags = failed[:]
117 if 2 <= verbose:
118 search_tags += passed
120 search_tags.sort()
122 fd = open(output_log, 'r')
124 try:
126 try:
127 lines = fd.readlines()
128 finally:
129 fd.close()
131 fd.close()
133 end_output = '<<<execution_status>>>'
134 output_start = '<<<test_output>>>'
136 tag_re = re.compile('tag=(\w+)')
138 grab_output = False
140 local_context = ''
142 search_tag = None
144 line_iterator = lines.__iter__()
146 try:
148 while True:
150 line = line_iterator.next()
152 if line.startswith(end_output):
154 if search_tag:
155 context[search_tag] = local_context
157 grab_output = False
158 local_context = ''
159 search_tag = None
161 if not search_tag:
163 while True:
165 line = line_iterator.next()
167 match = tag_re.match(line)
169 if match and match.group(1) in search_tags:
170 search_tag = match.group(1)
171 break
173 elif line.startswith(output_start):
174 grab_output = True
175 elif grab_output:
176 local_context += line
178 except StopIteration:
179 pass
181 for k in context.keys():
182 if k not in search_tags:
183 raise ResultsParseException('Leftover token in search '
184 'keys: %s' % k)
186 except Exception, exc:
187 # XXX (garrcoop): change from Exception to soft error and print
188 # out warning with logging module.
189 raise ResultsParseException('Encountered exception reading output '
190 'for context: %s' % str(exc))
192 return failed, passed, context
194 def determine_context(output_log, testsuite, test_set, context):
195 """ Return a set of context values mapping test_set -> context. """
197 test_set_context = {}
199 for test in test_set:
201 if test in context:
202 test_context = context[test]
203 del context[test]
204 else:
205 test_context = ('Could not determine context for %s; please see '
206 'output log - %s' % (test, output_log))
209 test_set_context['%s : %s' % (testsuite, test)] = test_context
211 return test_set_context
213 def print_context(output_dest, header, testsuite_context):
214 """ Print out testsuite_context to output_dest, heading it up with
215 header.
217 output_dest.write('\n'.join(['', '=' * 40, header, '-' * 40, '']))
219 for test, context in testsuite_context.items():
220 output_dest.write('<output test="%s">\n%s\n</output>\n' %
221 (test, context.strip()))
223 def main():
224 """ main. """
226 parser = OptionParser(prog=os.path.basename(sys.argv[0]),
227 usage='usage: %prog [options] test ...',
228 version='0.0.1')
230 ltpdir = os.getenv('LTPROOT', '@prefix@')
232 parser.add_option('-l', '--ltp-dir', dest='ltp_dir',
233 default=ltpdir, help='LTP directory [default: %default]')
234 parser.add_option('-L', '--log-dir', dest='log_dir',
235 default=None,
236 help=('directory for [storing and] retrieving logs '
237 '[default: %s/output]' % ltpdir),
238 metavar='DIR')
239 parser.add_option('-p', '--postprocess-only', dest='postprocess_only',
240 default=False, action='store_true',
241 help=("Don't execute runltp; just postprocess logs "
242 "[default: %default]."))
243 parser.add_option('-o', '--output-file', dest='output_file',
244 default=None,
245 help='File to output results')
246 parser.add_option('-r', '--runltp-opts', dest='runltp_opts',
247 default='',
248 help=('options to pass directly to runltp (will '
249 'suppress -q).'))
251 group = OptionGroup(parser, 'Logging',
252 'If --summary-mode is 0, then the summary output is '
253 'suppressed. '
254 'If --summary-mode is 1 [the default], then summary '
255 'output will be displayed for test execution'
256 'If --summary-mode is 2, then summary output will be '
257 'provided on a per-test suite basis. If only '
258 'one test suite is specified, this has the same net '
259 "effect as `--summary-mode 1'"
260 'If --verbose is specified once, prints out failed '
261 'test information with additional context. '
262 'If --verbose is specified twice, prints out the '
263 'failed and passed test context, as well as the '
264 'summary.')
266 parser.add_option('-s', '--summary-mode', dest='summary_mode', default=1,
267 type='int',
268 help='See Logging.')
270 parser.add_option('-v', '--verbose', dest='verbose', default=0,
271 action='count',
272 help=('Increases context verbosity from tests. See '
273 'Verbosity for more details.'))
274 parser.add_option_group(group)
276 group = OptionGroup(parser, 'Copyright',
277 '%(prog)s version %(version)s, Copyright (C) 2009, '
278 'Garrett Cooper %(prog)s comes with ABSOLUTELY NO '
279 'WARRANTY; '
280 'This is free software, and you are welcome to '
281 'redistribute it under certain conditions (See the '
282 'license tort in %(file)s for more details).'
283 % { 'file' : os.path.abspath(__file__),
284 'prog' : parser.prog,
285 'version' : parser.version })
287 parser.add_option_group(group)
289 opts, args = parser.parse_args()
291 # Remove -q from the opts string, as long as it's a standalone option.
292 runltp_opts = re.sub('^((?<!\S)+\-q\s+|\-q|\s+\-q(?!\S))$', '',
293 opts.runltp_opts)
295 if not opts.log_dir:
296 opts.log_dir = os.path.join(opts.ltp_dir, 'output')
298 if not opts.summary_mode and not opts.verbose:
299 parser.error('You cannot suppress summary output and disable '
300 'verbosity.')
301 elif opts.summary_mode not in range(3):
302 parser.error('--summary-mode must be a value between 0 and 2.')
304 if len(args) == 0:
305 # This matches the default test suite list in runltp when -f isn't
306 # specified. Look for `SCENFILES'.
307 args = [ 'syscalls', 'fs', 'fsx', 'dio', 'io', 'mm', 'ipc', 'sched',
308 'math', 'nptl', 'pty', 'containers', 'fs_bind', 'controllers',
309 'filecaps', 'cap_bounds', 'fcntl-locktests', 'connectors',
310 'admin_tools', 'timers', 'power_management_tests', 'numa',
311 'hugetlb', 'commands', 'hyperthreading' ]
313 if opts.output_file:
315 output_dir = os.path.dirname(opts.output_file)
317 if output_dir:
318 # Not cwd; let's check to make sure that the directory does or
319 # does not exist.
321 if not os.path.exists(output_dir):
322 # We need to make the directory.
323 os.makedirs(os.path.dirname(opts.output_file))
324 elif not os.path.isdir(os.path.abspath(output_dir)):
325 # Path exists, but isn't a file. Oops!
326 parser.error('Dirname for path specified - %s - is not valid'
327 % output_dir)
329 else:
330 # Current path (cwd)
331 opts.output_file = os.path.join(os.getcwd(), opts.output_file)
333 output_dest = open(opts.output_file, 'w')
335 else:
337 output_dest = sys.stdout
339 try:
341 failed_context = {}
342 passed_context = {}
344 failed_count = 0
345 passed_count = 0
347 if opts.summary_mode == 2 and len(args) == 1:
348 opts.summary_mode = 1
350 for testsuite in args:
352 # Iterate over the provided test list
354 context = {}
355 exec_log = os.path.join(opts.log_dir, '%s-exec.log' % testsuite)
356 output_log = os.path.join(opts.log_dir, ('%s-output.log'
357 % testsuite))
359 failed_subset = {}
361 runtest_file = os.path.join(opts.ltp_dir, 'runtest', testsuite)
363 if not opts.postprocess_only:
365 for log in [ exec_log, output_log ]:
366 if os.path.isfile(log):
367 os.remove(log)
369 if not os.access(runtest_file, os.R_OK):
370 output_dest.write("%s doesn't exist; skipping "
371 "test\n" % runtest_file)
372 continue
374 os.system(' '.join([ os.path.join(opts.ltp_dir, 'runltp'),
375 runltp_opts, '-f', testsuite,
376 '-l', exec_log, '-o', output_log ]))
378 try:
380 failed_subset, passed_css, context = \
381 parse_ltp_results(exec_log, output_log,
382 verbose=opts.verbose)
384 except ResultsParseException, rpe:
385 output_dest.write('Error encountered when parsing results for '
386 'test - %s: %s\n' % (testsuite, str(rpe)))
387 continue
389 failed_count += len(failed_subset)
391 failed_subset_context = {}
392 passed_subset_context = {}
394 if opts.verbose:
395 failed_subset_context = determine_context(output_log,
396 testsuite,
397 failed_subset,
398 context)
399 if type(passed_css) == list:
401 passed_count += len(passed_css)
403 if opts.verbose == 2:
404 passed_subset_context = determine_context(output_log,
405 testsuite,
406 passed_css,
407 context)
409 else:
411 passed_count += passed_css
413 if opts.summary_mode == 1:
415 failed_context.update(failed_subset_context)
416 passed_context.update(passed_subset_context)
418 else:
420 if 1 <= opts.verbose:
421 # Print out failed testcases.
422 print_context(output_dest,
423 'FAILED TESTCASES for %s' % testsuite,
424 failed_subset_context)
426 if opts.verbose == 2:
427 # Print out passed testcases with context.
428 print_context(output_dest,
429 'PASSED TESTCASES for %s' % testsuite,
430 passed_subset_context)
432 if opts.summary_mode == 2:
433 output_dest.write("""
434 ========================================
435 SUMMARY for: %s
436 ----------------------------------------
437 PASS - %d
438 FAIL - %d
439 ----------------------------------------
440 """ % (testsuite, passed_count, len(failed_subset)))
442 if opts.summary_mode == 1:
444 # Print out overall results.
446 if 1 <= opts.verbose:
447 # Print out failed testcases with context.
448 print_context(output_dest, "FAILED TESTCASES", failed_context)
450 if opts.verbose == 2:
451 # Print out passed testcases with context.
452 print_context(output_dest, "PASSED TESTCASES", passed_context)
454 output_dest.write("""
455 ========================================
456 SUMMARY for tests:
458 ----------------------------------------
459 PASS - %d
460 FAIL - %d
461 ----------------------------------------
462 """ % (' '.join(args), passed_count, failed_count))
464 finally:
466 if output_dest != sys.stdout:
468 output_dest.close()
470 if __name__ == '__main__':
471 main()