[Android] Switch b/a/test_runner.py from optparse to argparse.
[chromium-blink-merge.git] / build / android / test_runner.py
blobe5016e2e9c4214048b622df1bd5d8ce5f08094d4
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import argparse
10 import collections
11 import logging
12 import os
13 import shutil
14 import signal
15 import sys
16 import threading
17 import unittest
19 from pylib import android_commands
20 from pylib import constants
21 from pylib import forwarder
22 from pylib import ports
23 from pylib.base import base_test_result
24 from pylib.base import environment_factory
25 from pylib.base import test_dispatcher
26 from pylib.base import test_instance_factory
27 from pylib.base import test_run_factory
28 from pylib.gtest import gtest_config
29 from pylib.gtest import setup as gtest_setup
30 from pylib.gtest import test_options as gtest_test_options
31 from pylib.linker import setup as linker_setup
32 from pylib.host_driven import setup as host_driven_setup
33 from pylib.instrumentation import setup as instrumentation_setup
34 from pylib.instrumentation import test_options as instrumentation_test_options
35 from pylib.junit import setup as junit_setup
36 from pylib.junit import test_dispatcher as junit_dispatcher
37 from pylib.monkey import setup as monkey_setup
38 from pylib.monkey import test_options as monkey_test_options
39 from pylib.perf import setup as perf_setup
40 from pylib.perf import test_options as perf_test_options
41 from pylib.perf import test_runner as perf_test_runner
42 from pylib.results import json_results
43 from pylib.results import report_results
44 from pylib.uiautomator import setup as uiautomator_setup
45 from pylib.uiautomator import test_options as uiautomator_test_options
46 from pylib.utils import apk_helper
47 from pylib.utils import reraiser_thread
48 from pylib.utils import run_tests_helper
51 def AddCommonOptions(parser):
52 """Adds all common options to |parser|."""
54 group = parser.add_argument_group('Common Options')
56 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
58 debug_or_release_group = group.add_mutually_exclusive_group()
59 debug_or_release_group.add_argument(
60 '--debug', action='store_const', const='Debug', dest='build_type',
61 default=default_build_type,
62 help=('If set, run test suites under out/Debug. '
63 'Default is env var BUILDTYPE or Debug.'))
64 debug_or_release_group.add_argument(
65 '--release', action='store_const', const='Release', dest='build_type',
66 help=('If set, run test suites under out/Release. '
67 'Default is env var BUILDTYPE or Debug.'))
69 group.add_argument('--build-directory', dest='build_directory',
70 help=('Path to the directory in which build files are'
71 ' located (should not include build type)'))
72 group.add_argument('--output-directory', dest='output_directory',
73 help=('Path to the directory in which build files are'
74 ' located (must include build type). This will take'
75 ' precedence over --debug, --release and'
76 ' --build-directory'))
77 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
78 help=('Number of retries for a test before '
79 'giving up (default: %(default)s).'))
80 group.add_argument('-v',
81 '--verbose',
82 dest='verbose_count',
83 default=0,
84 action='count',
85 help='Verbose level (multiple times for more)')
86 group.add_argument('--flakiness-dashboard-server',
87 dest='flakiness_dashboard_server',
88 help=('Address of the server that is hosting the '
89 'Chrome for Android flakiness dashboard.'))
90 group.add_argument('--enable-platform-mode', action='store_true',
91 help=('Run the test scripts in platform mode, which '
92 'conceptually separates the test runner from the '
93 '"device" (local or remote, real or emulated) on '
94 'which the tests are running. [experimental]'))
95 group.add_argument('-e', '--environment', default='local',
96 choices=constants.VALID_ENVIRONMENTS,
97 help='Test environment to run in (default: %(default)s).')
98 group.add_argument('--adb-path',
99 help=('Specify the absolute path of the adb binary that '
100 'should be used.'))
101 group.add_argument('--json-results-file', dest='json_results_file',
102 help='If set, will dump results in JSON form '
103 'to specified file.')
106 def ProcessCommonOptions(args):
107 """Processes and handles all common options."""
108 run_tests_helper.SetLogLevel(args.verbose_count)
109 constants.SetBuildType(args.build_type)
110 if args.build_directory:
111 constants.SetBuildDirectory(args.build_directory)
112 if args.output_directory:
113 constants.SetOutputDirectort(args.output_directory)
114 if args.adb_path:
115 constants.SetAdbPath(args.adb_path)
116 # Some things such as Forwarder require ADB to be in the environment path.
117 adb_dir = os.path.dirname(constants.GetAdbPath())
118 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
119 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
122 def AddDeviceOptions(parser):
123 """Adds device options to |parser|."""
124 group = parser.add_argument_group(title='Device Options')
125 group.add_argument('-c', dest='cleanup_test_files',
126 help='Cleanup test files on the device after run',
127 action='store_true')
128 group.add_argument('--tool',
129 dest='tool',
130 help=('Run the test under a tool '
131 '(use --tool help to list them)'))
132 group.add_argument('-d', '--device', dest='test_device',
133 help=('Target device for the test suite '
134 'to run on.'))
137 def AddGTestOptions(parser):
138 """Adds gtest options to |parser|."""
140 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
141 + gtest_config.EXPERIMENTAL_TEST_SUITES)
143 group = parser.add_argument_group('GTest Options')
144 group.add_argument('-s', '--suite', dest='suite_name', choices=gtest_suites,
145 nargs='+', metavar='SUITE_NAME', required=True,
146 help=('Executable name of the test suite to run.'))
147 group.add_argument('-f', '--gtest_filter', '--gtest-filter',
148 dest='test_filter',
149 help='googletest-style filter string.')
150 group.add_argument('--gtest_also_run_disabled_tests',
151 '--gtest-also-run-disabled-tests',
152 dest='run_disabled', action='store_true',
153 help='Also run disabled tests if applicable.')
154 group.add_argument('-a', '--test-arguments', dest='test_arguments',
155 default='',
156 help='Additional arguments to pass to the test.')
157 group.add_argument('-t', dest='timeout', type=int, default=60,
158 help='Timeout to wait for each test '
159 '(default: %(default)s).')
160 group.add_argument('--isolate_file_path',
161 '--isolate-file-path',
162 dest='isolate_file_path',
163 help='.isolate file path to override the default '
164 'path')
165 AddDeviceOptions(parser)
166 AddCommonOptions(parser)
169 def AddLinkerTestOptions(parser):
170 group = parser.add_argument_group('Linker Test Options')
171 group.add_argument('-f', '--gtest-filter', dest='test_filter',
172 help='googletest-style filter string.')
173 AddCommonOptions(parser)
174 AddDeviceOptions(parser)
177 def AddJavaTestOptions(argument_group):
178 """Adds the Java test options to |option_parser|."""
180 argument_group.add_argument(
181 '-f', '--test-filter', dest='test_filter',
182 help=('Test filter (if not fully qualified, will run all matches).'))
183 argument_group.add_argument(
184 '-A', '--annotation', dest='annotation_str',
185 help=('Comma-separated list of annotations. Run only tests with any of '
186 'the given annotations. An annotation can be either a key or a '
187 'key-values pair. A test that has no annotation is considered '
188 '"SmallTest".'))
189 argument_group.add_argument(
190 '-E', '--exclude-annotation', dest='exclude_annotation_str',
191 help=('Comma-separated list of annotations. Exclude tests with these '
192 'annotations.'))
193 argument_group.add_argument(
194 '--screenshot', dest='screenshot_failures', action='store_true',
195 help='Capture screenshots of test failures')
196 argument_group.add_argument(
197 '--save-perf-json', action='store_true',
198 help='Saves the JSON file for each UI Perf test.')
199 argument_group.add_argument(
200 '--official-build', action='store_true', help='Run official build tests.')
201 argument_group.add_argument(
202 '--test_data', '--test-data', action='append', default=[],
203 help=('Each instance defines a directory of test data that should be '
204 'copied to the target(s) before running the tests. The argument '
205 'should be of the form <target>:<source>, <target> is relative to '
206 'the device data directory, and <source> is relative to the '
207 'chromium build directory.'))
210 def ProcessJavaTestOptions(args):
211 """Processes options/arguments and populates |options| with defaults."""
213 # TODO(jbudorick): Handle most of this function in argparse.
214 if args.annotation_str:
215 args.annotations = args.annotation_str.split(',')
216 elif args.test_filter:
217 args.annotations = []
218 else:
219 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
220 'EnormousTest', 'IntegrationTest']
222 if args.exclude_annotation_str:
223 args.exclude_annotations = args.exclude_annotation_str.split(',')
224 else:
225 args.exclude_annotations = []
228 def AddInstrumentationTestOptions(parser):
229 """Adds Instrumentation test options to |parser|."""
231 parser.usage = '%(prog)s [options]'
233 group = parser.add_argument_group('Instrumentation Test Options')
234 AddJavaTestOptions(group)
236 java_or_python_group = group.add_mutually_exclusive_group()
237 java_or_python_group.add_argument(
238 '-j', '--java-only', action='store_false',
239 dest='run_python_tests', default=True, help='Run only the Java tests.')
240 java_or_python_group.add_argument(
241 '-p', '--python-only', action='store_false',
242 dest='run_java_tests', default=True,
243 help='Run only the host-driven tests.')
245 group.add_argument('--host-driven-root',
246 help='Root of the host-driven tests.')
247 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
248 action='store_true',
249 help='Wait for debugger.')
250 group.add_argument('--test-apk', dest='test_apk', required=True,
251 help=('The name of the apk containing the tests '
252 '(without the .apk extension; '
253 'e.g. "ContentShellTest").'))
254 group.add_argument('--coverage-dir',
255 help=('Directory in which to place all generated '
256 'EMMA coverage files.'))
257 group.add_argument('--device-flags', dest='device_flags', default='',
258 help='The relative filepath to a file containing '
259 'command-line flags to set on the device')
260 group.add_argument('--isolate_file_path',
261 '--isolate-file-path',
262 dest='isolate_file_path',
263 help='.isolate file path to override the default '
264 'path')
266 AddCommonOptions(parser)
267 AddDeviceOptions(parser)
270 def ProcessInstrumentationOptions(args):
271 """Processes options/arguments and populate |options| with defaults.
273 Args:
274 args: argparse.Namespace object.
276 Returns:
277 An InstrumentationOptions named tuple which contains all options relevant to
278 instrumentation tests.
281 ProcessJavaTestOptions(args)
283 if not args.host_driven_root:
284 args.run_python_tests = False
286 args.test_apk_path = os.path.join(
287 constants.GetOutDirectory(),
288 constants.SDK_BUILD_APKS_DIR,
289 '%s.apk' % args.test_apk)
290 args.test_apk_jar_path = os.path.join(
291 constants.GetOutDirectory(),
292 constants.SDK_BUILD_TEST_JAVALIB_DIR,
293 '%s.jar' % args.test_apk)
294 args.test_support_apk_path = '%sSupport%s' % (
295 os.path.splitext(args.test_apk_path))
297 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
299 # TODO(jbudorick): Get rid of InstrumentationOptions.
300 return instrumentation_test_options.InstrumentationOptions(
301 args.tool,
302 args.cleanup_test_files,
303 args.annotations,
304 args.exclude_annotations,
305 args.test_filter,
306 args.test_data,
307 args.save_perf_json,
308 args.screenshot_failures,
309 args.wait_for_debugger,
310 args.coverage_dir,
311 args.test_apk,
312 args.test_apk_path,
313 args.test_apk_jar_path,
314 args.test_runner,
315 args.test_support_apk_path,
316 args.device_flags,
317 args.isolate_file_path
321 def AddUIAutomatorTestOptions(parser):
322 """Adds UI Automator test options to |parser|."""
324 group = parser.add_argument_group('UIAutomator Test Options')
325 AddJavaTestOptions(group)
326 group.add_argument(
327 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
328 metavar='PACKAGE', help='Package under test.')
329 group.add_argument(
330 '--test-jar', dest='test_jar', required=True,
331 help=('The name of the dexed jar containing the tests (without the '
332 '.dex.jar extension). Alternatively, this can be a full path '
333 'to the jar.'))
335 AddCommonOptions(parser)
336 AddDeviceOptions(parser)
339 def ProcessUIAutomatorOptions(args):
340 """Processes UIAutomator options/arguments.
342 Args:
343 args: argparse.Namespace object.
345 Returns:
346 A UIAutomatorOptions named tuple which contains all options relevant to
347 uiautomator tests.
350 ProcessJavaTestOptions(args)
352 if os.path.exists(args.test_jar):
353 # The dexed JAR is fully qualified, assume the info JAR lives along side.
354 args.uiautomator_jar = args.test_jar
355 else:
356 args.uiautomator_jar = os.path.join(
357 constants.GetOutDirectory(),
358 constants.SDK_BUILD_JAVALIB_DIR,
359 '%s.dex.jar' % args.test_jar)
360 args.uiautomator_info_jar = (
361 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
362 '_java.jar')
364 return uiautomator_test_options.UIAutomatorOptions(
365 args.tool,
366 args.cleanup_test_files,
367 args.annotations,
368 args.exclude_annotations,
369 args.test_filter,
370 args.test_data,
371 args.save_perf_json,
372 args.screenshot_failures,
373 args.uiautomator_jar,
374 args.uiautomator_info_jar,
375 args.package)
378 def AddJUnitTestOptions(parser):
379 """Adds junit test options to |parser|."""
381 group = parser.add_argument_group('JUnit Test Options')
382 group.add_argument(
383 '-s', '--test-suite', dest='test_suite', required=True,
384 help=('JUnit test suite to run.'))
385 group.add_argument(
386 '-f', '--test-filter', dest='test_filter',
387 help='Filters tests googletest-style.')
388 group.add_argument(
389 '--package-filter', dest='package_filter',
390 help='Filters tests by package.')
391 group.add_argument(
392 '--runner-filter', dest='runner_filter',
393 help='Filters tests by runner class. Must be fully qualified.')
394 group.add_argument(
395 '--sdk-version', dest='sdk_version', type=int,
396 help='The Android SDK version.')
397 AddCommonOptions(parser)
400 def AddMonkeyTestOptions(parser):
401 """Adds monkey test options to |parser|."""
403 group = parser.add_argument_group('Monkey Test Options')
404 group.add_argument(
405 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
406 metavar='PACKAGE', help='Package under test.')
407 group.add_argument(
408 '--event-count', default=10000, type=int,
409 help='Number of events to generate (default: %(default)s).')
410 group.add_argument(
411 '--category', default='',
412 help='A list of allowed categories.')
413 group.add_argument(
414 '--throttle', default=100, type=int,
415 help='Delay between events (ms) (default: %(default)s). ')
416 group.add_argument(
417 '--seed', type=int,
418 help=('Seed value for pseudo-random generator. Same seed value generates '
419 'the same sequence of events. Seed is randomized by default.'))
420 group.add_argument(
421 '--extra-args', default='',
422 help=('String of other args to pass to the command verbatim.'))
424 AddCommonOptions(parser)
425 AddDeviceOptions(parser)
428 def ProcessMonkeyTestOptions(args):
429 """Processes all monkey test options.
431 Args:
432 args: argparse.Namespace object.
434 Returns:
435 A MonkeyOptions named tuple which contains all options relevant to
436 monkey tests.
438 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
439 category = args.category
440 if category:
441 category = args.category.split(',')
443 # TODO(jbudorick): Get rid of MonkeyOptions.
444 return monkey_test_options.MonkeyOptions(
445 args.verbose_count,
446 args.package,
447 args.event_count,
448 category,
449 args.throttle,
450 args.seed,
451 args.extra_args)
454 def AddPerfTestOptions(parser):
455 """Adds perf test options to |parser|."""
457 group = parser.add_argument_group('Perf Test Options')
459 class SingleStepAction(argparse.Action):
460 def __call__(self, parser, namespace, values, option_string=None):
461 if values and not namespace.single_step:
462 parser.error('single step command provided, '
463 'but --single-step not specified.')
464 elif namespace.single_step and not values:
465 parser.error('--single-step specified, '
466 'but no single step command provided.')
467 setattr(namespace, self.dest, values)
469 step_group = group.add_mutually_exclusive_group(required=True)
470 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
471 # This requires removing "--" from client calls.
472 step_group.add_argument(
473 '--single-step', action='store_true',
474 help='Execute the given command with retries, but only print the result '
475 'for the "most successful" round.')
476 step_group.add_argument(
477 '--steps',
478 help='JSON file containing the list of commands to run.')
479 step_group.add_argument(
480 '--print-step',
481 help='The name of a previously executed perf step to print.')
483 group.add_argument(
484 '--output-json-list',
485 help='Write a simple list of names from --steps into the given file.')
486 group.add_argument(
487 '--collect-chartjson-data',
488 action='store_true',
489 help='Cache the chartjson output from each step for later use.')
490 group.add_argument(
491 '--output-chartjson-data',
492 default='',
493 help='Write out chartjson into the given file.')
494 group.add_argument(
495 '--flaky-steps',
496 help=('A JSON file containing steps that are flaky '
497 'and will have its exit code ignored.'))
498 group.add_argument(
499 '--no-timeout', action='store_true',
500 help=('Do not impose a timeout. Each perf step is responsible for '
501 'implementing the timeout logic.'))
502 group.add_argument(
503 '-f', '--test-filter',
504 help=('Test filter (will match against the names listed in --steps).'))
505 group.add_argument(
506 '--dry-run', action='store_true',
507 help='Just print the steps without executing.')
508 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
509 help='If --single-step is specified, the command to run.')
510 AddCommonOptions(parser)
511 AddDeviceOptions(parser)
514 def ProcessPerfTestOptions(args):
515 """Processes all perf test options.
517 Args:
518 args: argparse.Namespace object.
520 Returns:
521 A PerfOptions named tuple which contains all options relevant to
522 perf tests.
524 # TODO(jbudorick): Move single_step handling down into the perf tests.
525 if args.single_step:
526 args.single_step = ' '.join(args.single_step_command)
527 # TODO(jbudorick): Get rid of PerfOptions.
528 return perf_test_options.PerfOptions(
529 args.steps, args.flaky_steps, args.output_json_list,
530 args.print_step, args.no_timeout, args.test_filter,
531 args.dry_run, args.single_step, args.collect_chartjson_data,
532 args.output_chartjson_data)
535 def AddPythonTestOptions(parser):
536 group = parser.add_argument_group('Python Test Options')
537 group.add_argument(
538 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
539 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
540 help='Name of the test suite to run.')
541 AddCommonOptions(parser)
544 def _RunGTests(args, devices):
545 """Subcommand of RunTestsCommands which runs gtests."""
546 exit_code = 0
547 for suite_name in args.suite_name:
548 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
549 # into the gtest code.
550 gtest_options = gtest_test_options.GTestOptions(
551 args.tool,
552 args.cleanup_test_files,
553 args.test_filter,
554 args.run_disabled,
555 args.test_arguments,
556 args.timeout,
557 args.isolate_file_path,
558 suite_name)
559 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
561 results, test_exit_code = test_dispatcher.RunTests(
562 tests, runner_factory, devices, shard=True, test_timeout=None,
563 num_retries=args.num_retries)
565 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
566 exit_code = test_exit_code
568 report_results.LogFull(
569 results=results,
570 test_type='Unit test',
571 test_package=suite_name,
572 flakiness_server=args.flakiness_dashboard_server)
574 if args.json_results_file:
575 json_results.GenerateJsonResultsFile(results, args.json_results_file)
577 if os.path.isdir(constants.ISOLATE_DEPS_DIR):
578 shutil.rmtree(constants.ISOLATE_DEPS_DIR)
580 return exit_code
583 def _RunLinkerTests(args, devices):
584 """Subcommand of RunTestsCommands which runs linker tests."""
585 runner_factory, tests = linker_setup.Setup(args, devices)
587 results, exit_code = test_dispatcher.RunTests(
588 tests, runner_factory, devices, shard=True, test_timeout=60,
589 num_retries=args.num_retries)
591 report_results.LogFull(
592 results=results,
593 test_type='Linker test',
594 test_package='ChromiumLinkerTest')
596 if args.json_results_file:
597 json_results.GenerateJsonResultsFile(results, args.json_results_file)
599 return exit_code
602 def _RunInstrumentationTests(args, devices):
603 """Subcommand of RunTestsCommands which runs instrumentation tests."""
604 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
606 instrumentation_options = ProcessInstrumentationOptions(args)
608 if len(devices) > 1 and args.wait_for_debugger:
609 logging.warning('Debugger can not be sharded, using first available device')
610 devices = devices[:1]
612 results = base_test_result.TestRunResults()
613 exit_code = 0
615 if args.run_java_tests:
616 runner_factory, tests = instrumentation_setup.Setup(
617 instrumentation_options, devices)
619 test_results, exit_code = test_dispatcher.RunTests(
620 tests, runner_factory, devices, shard=True, test_timeout=None,
621 num_retries=args.num_retries)
623 results.AddTestRunResults(test_results)
625 if args.run_python_tests:
626 runner_factory, tests = host_driven_setup.InstrumentationSetup(
627 args.host_driven_root, args.official_build,
628 instrumentation_options)
630 if tests:
631 test_results, test_exit_code = test_dispatcher.RunTests(
632 tests, runner_factory, devices, shard=True, test_timeout=None,
633 num_retries=args.num_retries)
635 results.AddTestRunResults(test_results)
637 # Only allow exit code escalation
638 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
639 exit_code = test_exit_code
641 if args.device_flags:
642 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
643 args.device_flags)
645 report_results.LogFull(
646 results=results,
647 test_type='Instrumentation',
648 test_package=os.path.basename(args.test_apk),
649 annotation=args.annotations,
650 flakiness_server=args.flakiness_dashboard_server)
652 if args.json_results_file:
653 json_results.GenerateJsonResultsFile(results, args.json_results_file)
655 return exit_code
658 def _RunUIAutomatorTests(args, devices):
659 """Subcommand of RunTestsCommands which runs uiautomator tests."""
660 uiautomator_options = ProcessUIAutomatorOptions(args)
662 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
664 results, exit_code = test_dispatcher.RunTests(
665 tests, runner_factory, devices, shard=True, test_timeout=None,
666 num_retries=args.num_retries)
668 report_results.LogFull(
669 results=results,
670 test_type='UIAutomator',
671 test_package=os.path.basename(args.test_jar),
672 annotation=args.annotations,
673 flakiness_server=args.flakiness_dashboard_server)
675 if args.json_results_file:
676 json_results.GenerateJsonResultsFile(results, args.json_results_file)
678 return exit_code
681 def _RunJUnitTests(args):
682 """Subcommand of RunTestsCommand which runs junit tests."""
683 runner_factory, tests = junit_setup.Setup(args)
684 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
685 return exit_code
688 def _RunMonkeyTests(args, devices):
689 """Subcommand of RunTestsCommands which runs monkey tests."""
690 monkey_options = ProcessMonkeyTestOptions(args)
692 runner_factory, tests = monkey_setup.Setup(monkey_options)
694 results, exit_code = test_dispatcher.RunTests(
695 tests, runner_factory, devices, shard=False, test_timeout=None,
696 num_retries=args.num_retries)
698 report_results.LogFull(
699 results=results,
700 test_type='Monkey',
701 test_package='Monkey')
703 if args.json_results_file:
704 json_results.GenerateJsonResultsFile(results, args.json_results_file)
706 return exit_code
709 def _RunPerfTests(args):
710 """Subcommand of RunTestsCommands which runs perf tests."""
711 perf_options = ProcessPerfTestOptions(args)
713 # Just save a simple json with a list of test names.
714 if perf_options.output_json_list:
715 return perf_test_runner.OutputJsonList(
716 perf_options.steps, perf_options.output_json_list)
718 if perf_options.output_chartjson_data:
719 return perf_test_runner.OutputChartjson(
720 perf_options.print_step, perf_options.output_chartjson_data)
722 # Just print the results from a single previously executed step.
723 if perf_options.print_step:
724 return perf_test_runner.PrintTestOutput(perf_options.print_step)
726 runner_factory, tests, devices = perf_setup.Setup(perf_options)
728 # shard=False means that each device will get the full list of tests
729 # and then each one will decide their own affinity.
730 # shard=True means each device will pop the next test available from a queue,
731 # which increases throughput but have no affinity.
732 results, _ = test_dispatcher.RunTests(
733 tests, runner_factory, devices, shard=False, test_timeout=None,
734 num_retries=args.num_retries)
736 report_results.LogFull(
737 results=results,
738 test_type='Perf',
739 test_package='Perf')
741 if args.json_results_file:
742 json_results.GenerateJsonResultsFile(results, args.json_results_file)
744 if perf_options.single_step:
745 return perf_test_runner.PrintTestOutput('single_step')
747 perf_test_runner.PrintSummary(tests)
749 # Always return 0 on the sharding stage. Individual tests exit_code
750 # will be returned on the print_step stage.
751 return 0
754 def _RunPythonTests(args):
755 """Subcommand of RunTestsCommand which runs python unit tests."""
756 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
757 suite_path = suite_vars['path']
758 suite_test_modules = suite_vars['test_modules']
760 sys.path = [suite_path] + sys.path
761 try:
762 suite = unittest.TestSuite()
763 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
764 for m in suite_test_modules)
765 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
766 return 0 if runner.run(suite).wasSuccessful() else 1
767 finally:
768 sys.path = sys.path[1:]
771 def _GetAttachedDevices(test_device=None):
772 """Get all attached devices.
774 Args:
775 test_device: Name of a specific device to use.
777 Returns:
778 A list of attached devices.
780 attached_devices = []
782 attached_devices = android_commands.GetAttachedDevices()
783 if test_device:
784 assert test_device in attached_devices, (
785 'Did not find device %s among attached device. Attached devices: %s'
786 % (test_device, ', '.join(attached_devices)))
787 attached_devices = [test_device]
789 assert attached_devices, 'No devices attached.'
791 return sorted(attached_devices)
794 def RunTestsCommand(args, parser):
795 """Checks test type and dispatches to the appropriate function.
797 Args:
798 args: argparse.Namespace object.
799 parser: argparse.ArgumentParser object.
801 Returns:
802 Integer indicated exit code.
804 Raises:
805 Exception: Unknown command name passed in, or an exception from an
806 individual test runner.
808 command = args.command
810 ProcessCommonOptions(args)
812 if args.enable_platform_mode:
813 return RunTestsInPlatformMode(args, parser.error)
815 if command in constants.LOCAL_MACHINE_TESTS:
816 devices = []
817 else:
818 devices = _GetAttachedDevices(args.test_device)
820 forwarder.Forwarder.RemoveHostLog()
821 if not ports.ResetTestServerPortAllocation():
822 raise Exception('Failed to reset test server port.')
824 if command == 'gtest':
825 return _RunGTests(args, devices)
826 elif command == 'linker':
827 return _RunLinkerTests(args, devices)
828 elif command == 'instrumentation':
829 return _RunInstrumentationTests(args, devices)
830 elif command == 'uiautomator':
831 return _RunUIAutomatorTests(args, devices)
832 elif command == 'junit':
833 return _RunJUnitTests(args)
834 elif command == 'monkey':
835 return _RunMonkeyTests(args, devices)
836 elif command == 'perf':
837 return _RunPerfTests(args)
838 elif command == 'python':
839 return _RunPythonTests(args)
840 else:
841 raise Exception('Unknown test type.')
844 _SUPPORTED_IN_PLATFORM_MODE = [
845 # TODO(jbudorick): Add support for more test types.
846 'gtest',
850 def RunTestsInPlatformMode(args, parser):
852 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
853 parser.error('%s is not yet supported in platform mode' % args.command)
855 with environment_factory.CreateEnvironment(args, parser.error) as env:
856 with test_instance_factory.CreateTestInstance(args, parser.error) as test:
857 with test_run_factory.CreateTestRun(
858 args, env, test, parser.error) as test_run:
859 results = test_run.RunTests()
861 report_results.LogFull(
862 results=results,
863 test_type=test.TestType(),
864 test_package=test_run.TestPackage(),
865 annotation=args.annotations,
866 flakiness_server=args.flakiness_dashboard_server)
868 if args.json_results_file:
869 json_results.GenerateJsonResultsFile(
870 results, args.json_results_file)
872 return results
875 CommandConfigTuple = collections.namedtuple(
876 'CommandConfigTuple',
877 ['add_options_func', 'help_txt'])
878 VALID_COMMANDS = {
879 'gtest': CommandConfigTuple(
880 AddGTestOptions,
881 'googletest-based C++ tests'),
882 'instrumentation': CommandConfigTuple(
883 AddInstrumentationTestOptions,
884 'InstrumentationTestCase-based Java tests'),
885 'uiautomator': CommandConfigTuple(
886 AddUIAutomatorTestOptions,
887 "Tests that run via Android's uiautomator command"),
888 'junit': CommandConfigTuple(
889 AddJUnitTestOptions,
890 'JUnit4-based Java tests'),
891 'monkey': CommandConfigTuple(
892 AddMonkeyTestOptions,
893 "Tests based on Android's monkey"),
894 'perf': CommandConfigTuple(
895 AddPerfTestOptions,
896 'Performance tests'),
897 'python': CommandConfigTuple(
898 AddPythonTestOptions,
899 'Python tests based on unittest.TestCase'),
900 'linker': CommandConfigTuple(
901 AddLinkerTestOptions,
902 'Linker tests'),
906 def DumpThreadStacks(_signal, _frame):
907 for thread in threading.enumerate():
908 reraiser_thread.LogThreadStack(thread)
911 def main():
912 signal.signal(signal.SIGUSR1, DumpThreadStacks)
914 parser = argparse.ArgumentParser()
915 command_parsers = parser.add_subparsers(title='test types',
916 dest='command')
918 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
919 key=lambda x: x[0]):
920 subparser = command_parsers.add_parser(
921 test_type, usage='%(prog)s [options]', help=config.help_txt)
922 config.add_options_func(subparser)
924 args = parser.parse_args()
925 RunTestsCommand(args, parser)
927 return 0
930 if __name__ == '__main__':
931 sys.exit(main())