[chromedriver] Implement /sessions REST endpoint
[chromium-blink-merge.git] / build / android / test_runner.py
blob657e7c2152a6d77ad213936d6b0931811eef0d95
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import argparse
10 import collections
11 import logging
12 import os
13 import shutil
14 import signal
15 import sys
16 import threading
17 import unittest
19 from pylib import android_commands
20 from pylib import constants
21 from pylib import forwarder
22 from pylib import ports
23 from pylib.base import base_test_result
24 from pylib.base import environment_factory
25 from pylib.base import test_dispatcher
26 from pylib.base import test_instance_factory
27 from pylib.base import test_run_factory
28 from pylib.gtest import gtest_config
29 from pylib.gtest import setup as gtest_setup
30 from pylib.gtest import test_options as gtest_test_options
31 from pylib.linker import setup as linker_setup
32 from pylib.host_driven import setup as host_driven_setup
33 from pylib.instrumentation import setup as instrumentation_setup
34 from pylib.instrumentation import test_options as instrumentation_test_options
35 from pylib.junit import setup as junit_setup
36 from pylib.junit import test_dispatcher as junit_dispatcher
37 from pylib.monkey import setup as monkey_setup
38 from pylib.monkey import test_options as monkey_test_options
39 from pylib.perf import setup as perf_setup
40 from pylib.perf import test_options as perf_test_options
41 from pylib.perf import test_runner as perf_test_runner
42 from pylib.results import json_results
43 from pylib.results import report_results
44 from pylib.uiautomator import setup as uiautomator_setup
45 from pylib.uiautomator import test_options as uiautomator_test_options
46 from pylib.utils import apk_helper
47 from pylib.utils import reraiser_thread
48 from pylib.utils import run_tests_helper
51 def AddCommonOptions(parser):
52 """Adds all common options to |parser|."""
54 group = parser.add_argument_group('Common Options')
56 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
58 debug_or_release_group = group.add_mutually_exclusive_group()
59 debug_or_release_group.add_argument(
60 '--debug', action='store_const', const='Debug', dest='build_type',
61 default=default_build_type,
62 help=('If set, run test suites under out/Debug. '
63 'Default is env var BUILDTYPE or Debug.'))
64 debug_or_release_group.add_argument(
65 '--release', action='store_const', const='Release', dest='build_type',
66 help=('If set, run test suites under out/Release. '
67 'Default is env var BUILDTYPE or Debug.'))
69 group.add_argument('--build-directory', dest='build_directory',
70 help=('Path to the directory in which build files are'
71 ' located (should not include build type)'))
72 group.add_argument('--output-directory', dest='output_directory',
73 help=('Path to the directory in which build files are'
74 ' located (must include build type). This will take'
75 ' precedence over --debug, --release and'
76 ' --build-directory'))
77 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
78 help=('Number of retries for a test before '
79 'giving up (default: %(default)s).'))
80 group.add_argument('-v',
81 '--verbose',
82 dest='verbose_count',
83 default=0,
84 action='count',
85 help='Verbose level (multiple times for more)')
86 group.add_argument('--flakiness-dashboard-server',
87 dest='flakiness_dashboard_server',
88 help=('Address of the server that is hosting the '
89 'Chrome for Android flakiness dashboard.'))
90 group.add_argument('--enable-platform-mode', action='store_true',
91 help=('Run the test scripts in platform mode, which '
92 'conceptually separates the test runner from the '
93 '"device" (local or remote, real or emulated) on '
94 'which the tests are running. [experimental]'))
95 group.add_argument('-e', '--environment', default='local',
96 choices=constants.VALID_ENVIRONMENTS,
97 help='Test environment to run in (default: %(default)s).')
98 group.add_argument('--adb-path',
99 help=('Specify the absolute path of the adb binary that '
100 'should be used.'))
101 group.add_argument('--json-results-file', dest='json_results_file',
102 help='If set, will dump results in JSON form '
103 'to specified file.')
106 def ProcessCommonOptions(args):
107 """Processes and handles all common options."""
108 run_tests_helper.SetLogLevel(args.verbose_count)
109 constants.SetBuildType(args.build_type)
110 if args.build_directory:
111 constants.SetBuildDirectory(args.build_directory)
112 if args.output_directory:
113 constants.SetOutputDirectort(args.output_directory)
114 if args.adb_path:
115 constants.SetAdbPath(args.adb_path)
116 # Some things such as Forwarder require ADB to be in the environment path.
117 adb_dir = os.path.dirname(constants.GetAdbPath())
118 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
119 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
122 def AddRemoteDeviceOptions(parser):
123 group = parser.add_argument_group('Remote Device Options')
125 group.add_argument('--trigger', default='',
126 help=('Only triggers the test if set. Stores test_run_id '
127 'in given file path. '))
128 group.add_argument('--collect', default='',
129 help=('Only collects the test results if set. '
130 'Gets test_run_id from given file path.'))
131 group.add_argument('--remote-device', default='Nexus 5',
132 help=('Device type to run test on.'))
133 group.add_argument('--remote-device-os', default='4.4.2',
134 help=('OS to have on the device.'))
135 group.add_argument('--results-path', default='',
136 help=('File path to download results to.'))
137 group.add_argument('--api-protocol',
138 help=('HTTP protocol to use. (http or https)'))
139 group.add_argument('--api-address', help=('Address to send HTTP requests.'))
140 group.add_argument('--api-port', help=('Port to send HTTP requests to.'))
141 group.add_argument('--runner-type', default='',
142 help=('Type of test to run as.'))
143 group.add_argument('--runner-package', default='',
144 help=('Package name of test.'))
145 group.add_argument('--apk-under-test', default='apks/Chrome.apk',
146 help=('APK to run tests on.'))
148 api_secret_group = group.add_mutually_exclusive_group()
149 api_secret_group.add_argument('--api-secret', default='',
150 help=('API secret for remote devices.'))
151 api_secret_group.add_argument('--api-secret-file', default='',
152 help=('Path to file that contains API secret.'))
154 api_key_group = group.add_mutually_exclusive_group()
155 api_key_group.add_argument('--api-key', default='',
156 help=('API key for remote devices.'))
157 api_key_group.add_argument('--api-key-file', default='',
158 help=('Path to file that contains API key.'))
161 def AddDeviceOptions(parser):
162 """Adds device options to |parser|."""
163 group = parser.add_argument_group(title='Device Options')
164 group.add_argument('-c', dest='cleanup_test_files',
165 help='Cleanup test files on the device after run',
166 action='store_true')
167 group.add_argument('--tool',
168 dest='tool',
169 help=('Run the test under a tool '
170 '(use --tool help to list them)'))
171 group.add_argument('-d', '--device', dest='test_device',
172 help=('Target device for the test suite '
173 'to run on.'))
176 def AddGTestOptions(parser):
177 """Adds gtest options to |parser|."""
179 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
180 + gtest_config.EXPERIMENTAL_TEST_SUITES)
182 group = parser.add_argument_group('GTest Options')
183 group.add_argument('-s', '--suite', dest='suite_name',
184 nargs='+', metavar='SUITE_NAME', required=True,
185 help=('Executable name of the test suite to run. '
186 'Available suites include (but are not limited to): '
187 '%s' % ', '.join('"%s"' % s for s in gtest_suites)))
188 group.add_argument('-f', '--gtest_filter', '--gtest-filter',
189 dest='test_filter',
190 help='googletest-style filter string.')
191 group.add_argument('--gtest_also_run_disabled_tests',
192 '--gtest-also-run-disabled-tests',
193 dest='run_disabled', action='store_true',
194 help='Also run disabled tests if applicable.')
195 group.add_argument('-a', '--test-arguments', dest='test_arguments',
196 default='',
197 help='Additional arguments to pass to the test.')
198 group.add_argument('-t', dest='timeout', type=int, default=60,
199 help='Timeout to wait for each test '
200 '(default: %(default)s).')
201 group.add_argument('--isolate_file_path',
202 '--isolate-file-path',
203 dest='isolate_file_path',
204 help='.isolate file path to override the default '
205 'path')
206 AddDeviceOptions(parser)
207 AddCommonOptions(parser)
208 AddRemoteDeviceOptions(parser)
211 def AddLinkerTestOptions(parser):
212 group = parser.add_argument_group('Linker Test Options')
213 group.add_argument('-f', '--gtest-filter', dest='test_filter',
214 help='googletest-style filter string.')
215 AddCommonOptions(parser)
216 AddDeviceOptions(parser)
219 def AddJavaTestOptions(argument_group):
220 """Adds the Java test options to |option_parser|."""
222 argument_group.add_argument(
223 '-f', '--test-filter', dest='test_filter',
224 help=('Test filter (if not fully qualified, will run all matches).'))
225 argument_group.add_argument(
226 '-A', '--annotation', dest='annotation_str',
227 help=('Comma-separated list of annotations. Run only tests with any of '
228 'the given annotations. An annotation can be either a key or a '
229 'key-values pair. A test that has no annotation is considered '
230 '"SmallTest".'))
231 argument_group.add_argument(
232 '-E', '--exclude-annotation', dest='exclude_annotation_str',
233 help=('Comma-separated list of annotations. Exclude tests with these '
234 'annotations.'))
235 argument_group.add_argument(
236 '--screenshot', dest='screenshot_failures', action='store_true',
237 help='Capture screenshots of test failures')
238 argument_group.add_argument(
239 '--save-perf-json', action='store_true',
240 help='Saves the JSON file for each UI Perf test.')
241 argument_group.add_argument(
242 '--official-build', action='store_true', help='Run official build tests.')
243 argument_group.add_argument(
244 '--test_data', '--test-data', action='append', default=[],
245 help=('Each instance defines a directory of test data that should be '
246 'copied to the target(s) before running the tests. The argument '
247 'should be of the form <target>:<source>, <target> is relative to '
248 'the device data directory, and <source> is relative to the '
249 'chromium build directory.'))
252 def ProcessJavaTestOptions(args):
253 """Processes options/arguments and populates |options| with defaults."""
255 # TODO(jbudorick): Handle most of this function in argparse.
256 if args.annotation_str:
257 args.annotations = args.annotation_str.split(',')
258 elif args.test_filter:
259 args.annotations = []
260 else:
261 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
262 'EnormousTest', 'IntegrationTest']
264 if args.exclude_annotation_str:
265 args.exclude_annotations = args.exclude_annotation_str.split(',')
266 else:
267 args.exclude_annotations = []
270 def AddInstrumentationTestOptions(parser):
271 """Adds Instrumentation test options to |parser|."""
273 parser.usage = '%(prog)s [options]'
275 group = parser.add_argument_group('Instrumentation Test Options')
276 AddJavaTestOptions(group)
278 java_or_python_group = group.add_mutually_exclusive_group()
279 java_or_python_group.add_argument(
280 '-j', '--java-only', action='store_false',
281 dest='run_python_tests', default=True, help='Run only the Java tests.')
282 java_or_python_group.add_argument(
283 '-p', '--python-only', action='store_false',
284 dest='run_java_tests', default=True,
285 help='Run only the host-driven tests.')
287 group.add_argument('--host-driven-root',
288 help='Root of the host-driven tests.')
289 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
290 action='store_true',
291 help='Wait for debugger.')
292 group.add_argument('--test-apk', dest='test_apk', required=True,
293 help=('The name of the apk containing the tests '
294 '(without the .apk extension; '
295 'e.g. "ContentShellTest").'))
296 group.add_argument('--coverage-dir',
297 help=('Directory in which to place all generated '
298 'EMMA coverage files.'))
299 group.add_argument('--device-flags', dest='device_flags', default='',
300 help='The relative filepath to a file containing '
301 'command-line flags to set on the device')
302 group.add_argument('--isolate_file_path',
303 '--isolate-file-path',
304 dest='isolate_file_path',
305 help='.isolate file path to override the default '
306 'path')
308 AddCommonOptions(parser)
309 AddDeviceOptions(parser)
312 def ProcessInstrumentationOptions(args):
313 """Processes options/arguments and populate |options| with defaults.
315 Args:
316 args: argparse.Namespace object.
318 Returns:
319 An InstrumentationOptions named tuple which contains all options relevant to
320 instrumentation tests.
323 ProcessJavaTestOptions(args)
325 if not args.host_driven_root:
326 args.run_python_tests = False
328 args.test_apk_path = os.path.join(
329 constants.GetOutDirectory(),
330 constants.SDK_BUILD_APKS_DIR,
331 '%s.apk' % args.test_apk)
332 args.test_apk_jar_path = os.path.join(
333 constants.GetOutDirectory(),
334 constants.SDK_BUILD_TEST_JAVALIB_DIR,
335 '%s.jar' % args.test_apk)
336 args.test_support_apk_path = '%sSupport%s' % (
337 os.path.splitext(args.test_apk_path))
339 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
341 # TODO(jbudorick): Get rid of InstrumentationOptions.
342 return instrumentation_test_options.InstrumentationOptions(
343 args.tool,
344 args.cleanup_test_files,
345 args.annotations,
346 args.exclude_annotations,
347 args.test_filter,
348 args.test_data,
349 args.save_perf_json,
350 args.screenshot_failures,
351 args.wait_for_debugger,
352 args.coverage_dir,
353 args.test_apk,
354 args.test_apk_path,
355 args.test_apk_jar_path,
356 args.test_runner,
357 args.test_support_apk_path,
358 args.device_flags,
359 args.isolate_file_path
363 def AddUIAutomatorTestOptions(parser):
364 """Adds UI Automator test options to |parser|."""
366 group = parser.add_argument_group('UIAutomator Test Options')
367 AddJavaTestOptions(group)
368 group.add_argument(
369 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
370 metavar='PACKAGE', help='Package under test.')
371 group.add_argument(
372 '--test-jar', dest='test_jar', required=True,
373 help=('The name of the dexed jar containing the tests (without the '
374 '.dex.jar extension). Alternatively, this can be a full path '
375 'to the jar.'))
377 AddCommonOptions(parser)
378 AddDeviceOptions(parser)
381 def ProcessUIAutomatorOptions(args):
382 """Processes UIAutomator options/arguments.
384 Args:
385 args: argparse.Namespace object.
387 Returns:
388 A UIAutomatorOptions named tuple which contains all options relevant to
389 uiautomator tests.
392 ProcessJavaTestOptions(args)
394 if os.path.exists(args.test_jar):
395 # The dexed JAR is fully qualified, assume the info JAR lives along side.
396 args.uiautomator_jar = args.test_jar
397 else:
398 args.uiautomator_jar = os.path.join(
399 constants.GetOutDirectory(),
400 constants.SDK_BUILD_JAVALIB_DIR,
401 '%s.dex.jar' % args.test_jar)
402 args.uiautomator_info_jar = (
403 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
404 '_java.jar')
406 return uiautomator_test_options.UIAutomatorOptions(
407 args.tool,
408 args.cleanup_test_files,
409 args.annotations,
410 args.exclude_annotations,
411 args.test_filter,
412 args.test_data,
413 args.save_perf_json,
414 args.screenshot_failures,
415 args.uiautomator_jar,
416 args.uiautomator_info_jar,
417 args.package)
420 def AddJUnitTestOptions(parser):
421 """Adds junit test options to |parser|."""
423 group = parser.add_argument_group('JUnit Test Options')
424 group.add_argument(
425 '-s', '--test-suite', dest='test_suite', required=True,
426 help=('JUnit test suite to run.'))
427 group.add_argument(
428 '-f', '--test-filter', dest='test_filter',
429 help='Filters tests googletest-style.')
430 group.add_argument(
431 '--package-filter', dest='package_filter',
432 help='Filters tests by package.')
433 group.add_argument(
434 '--runner-filter', dest='runner_filter',
435 help='Filters tests by runner class. Must be fully qualified.')
436 group.add_argument(
437 '--sdk-version', dest='sdk_version', type=int,
438 help='The Android SDK version.')
439 AddCommonOptions(parser)
442 def AddMonkeyTestOptions(parser):
443 """Adds monkey test options to |parser|."""
445 group = parser.add_argument_group('Monkey Test Options')
446 group.add_argument(
447 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
448 metavar='PACKAGE', help='Package under test.')
449 group.add_argument(
450 '--event-count', default=10000, type=int,
451 help='Number of events to generate (default: %(default)s).')
452 group.add_argument(
453 '--category', default='',
454 help='A list of allowed categories.')
455 group.add_argument(
456 '--throttle', default=100, type=int,
457 help='Delay between events (ms) (default: %(default)s). ')
458 group.add_argument(
459 '--seed', type=int,
460 help=('Seed value for pseudo-random generator. Same seed value generates '
461 'the same sequence of events. Seed is randomized by default.'))
462 group.add_argument(
463 '--extra-args', default='',
464 help=('String of other args to pass to the command verbatim.'))
466 AddCommonOptions(parser)
467 AddDeviceOptions(parser)
469 def ProcessMonkeyTestOptions(args):
470 """Processes all monkey test options.
472 Args:
473 args: argparse.Namespace object.
475 Returns:
476 A MonkeyOptions named tuple which contains all options relevant to
477 monkey tests.
479 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
480 category = args.category
481 if category:
482 category = args.category.split(',')
484 # TODO(jbudorick): Get rid of MonkeyOptions.
485 return monkey_test_options.MonkeyOptions(
486 args.verbose_count,
487 args.package,
488 args.event_count,
489 category,
490 args.throttle,
491 args.seed,
492 args.extra_args)
494 def AddUirobotTestOptions(parser):
495 """Adds uirobot test options to |option_parser|."""
496 group = parser.add_argument_group('Uirobot Test Options')
498 group.add_argument(
499 '--minutes', default=5, type=int,
500 help='Number of minutes to run uirobot test [default: %default].')
502 AddCommonOptions(parser)
503 AddDeviceOptions(parser)
504 AddRemoteDeviceOptions(parser)
506 def AddPerfTestOptions(parser):
507 """Adds perf test options to |parser|."""
509 group = parser.add_argument_group('Perf Test Options')
511 class SingleStepAction(argparse.Action):
512 def __call__(self, parser, namespace, values, option_string=None):
513 if values and not namespace.single_step:
514 parser.error('single step command provided, '
515 'but --single-step not specified.')
516 elif namespace.single_step and not values:
517 parser.error('--single-step specified, '
518 'but no single step command provided.')
519 setattr(namespace, self.dest, values)
521 step_group = group.add_mutually_exclusive_group(required=True)
522 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
523 # This requires removing "--" from client calls.
524 step_group.add_argument(
525 '--single-step', action='store_true',
526 help='Execute the given command with retries, but only print the result '
527 'for the "most successful" round.')
528 step_group.add_argument(
529 '--steps',
530 help='JSON file containing the list of commands to run.')
531 step_group.add_argument(
532 '--print-step',
533 help='The name of a previously executed perf step to print.')
535 group.add_argument(
536 '--output-json-list',
537 help='Write a simple list of names from --steps into the given file.')
538 group.add_argument(
539 '--collect-chartjson-data',
540 action='store_true',
541 help='Cache the chartjson output from each step for later use.')
542 group.add_argument(
543 '--output-chartjson-data',
544 default='',
545 help='Write out chartjson into the given file.')
546 group.add_argument(
547 '--flaky-steps',
548 help=('A JSON file containing steps that are flaky '
549 'and will have its exit code ignored.'))
550 group.add_argument(
551 '--no-timeout', action='store_true',
552 help=('Do not impose a timeout. Each perf step is responsible for '
553 'implementing the timeout logic.'))
554 group.add_argument(
555 '-f', '--test-filter',
556 help=('Test filter (will match against the names listed in --steps).'))
557 group.add_argument(
558 '--dry-run', action='store_true',
559 help='Just print the steps without executing.')
560 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
561 help='If --single-step is specified, the command to run.')
562 AddCommonOptions(parser)
563 AddDeviceOptions(parser)
566 def ProcessPerfTestOptions(args):
567 """Processes all perf test options.
569 Args:
570 args: argparse.Namespace object.
572 Returns:
573 A PerfOptions named tuple which contains all options relevant to
574 perf tests.
576 # TODO(jbudorick): Move single_step handling down into the perf tests.
577 if args.single_step:
578 args.single_step = ' '.join(args.single_step_command)
579 # TODO(jbudorick): Get rid of PerfOptions.
580 return perf_test_options.PerfOptions(
581 args.steps, args.flaky_steps, args.output_json_list,
582 args.print_step, args.no_timeout, args.test_filter,
583 args.dry_run, args.single_step, args.collect_chartjson_data,
584 args.output_chartjson_data)
587 def AddPythonTestOptions(parser):
588 group = parser.add_argument_group('Python Test Options')
589 group.add_argument(
590 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
591 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
592 help='Name of the test suite to run.')
593 AddCommonOptions(parser)
596 def _RunGTests(args, devices):
597 """Subcommand of RunTestsCommands which runs gtests."""
598 exit_code = 0
599 for suite_name in args.suite_name:
600 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
601 # into the gtest code.
602 gtest_options = gtest_test_options.GTestOptions(
603 args.tool,
604 args.cleanup_test_files,
605 args.test_filter,
606 args.run_disabled,
607 args.test_arguments,
608 args.timeout,
609 args.isolate_file_path,
610 suite_name)
611 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
613 results, test_exit_code = test_dispatcher.RunTests(
614 tests, runner_factory, devices, shard=True, test_timeout=None,
615 num_retries=args.num_retries)
617 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
618 exit_code = test_exit_code
620 report_results.LogFull(
621 results=results,
622 test_type='Unit test',
623 test_package=suite_name,
624 flakiness_server=args.flakiness_dashboard_server)
626 if args.json_results_file:
627 json_results.GenerateJsonResultsFile(results, args.json_results_file)
629 if os.path.isdir(constants.ISOLATE_DEPS_DIR):
630 shutil.rmtree(constants.ISOLATE_DEPS_DIR)
632 return exit_code
635 def _RunLinkerTests(args, devices):
636 """Subcommand of RunTestsCommands which runs linker tests."""
637 runner_factory, tests = linker_setup.Setup(args, devices)
639 results, exit_code = test_dispatcher.RunTests(
640 tests, runner_factory, devices, shard=True, test_timeout=60,
641 num_retries=args.num_retries)
643 report_results.LogFull(
644 results=results,
645 test_type='Linker test',
646 test_package='ChromiumLinkerTest')
648 if args.json_results_file:
649 json_results.GenerateJsonResultsFile(results, args.json_results_file)
651 return exit_code
654 def _RunInstrumentationTests(args, devices):
655 """Subcommand of RunTestsCommands which runs instrumentation tests."""
656 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
658 instrumentation_options = ProcessInstrumentationOptions(args)
660 if len(devices) > 1 and args.wait_for_debugger:
661 logging.warning('Debugger can not be sharded, using first available device')
662 devices = devices[:1]
664 results = base_test_result.TestRunResults()
665 exit_code = 0
667 if args.run_java_tests:
668 runner_factory, tests = instrumentation_setup.Setup(
669 instrumentation_options, devices)
671 test_results, exit_code = test_dispatcher.RunTests(
672 tests, runner_factory, devices, shard=True, test_timeout=None,
673 num_retries=args.num_retries)
675 results.AddTestRunResults(test_results)
677 if args.run_python_tests:
678 runner_factory, tests = host_driven_setup.InstrumentationSetup(
679 args.host_driven_root, args.official_build,
680 instrumentation_options)
682 if tests:
683 test_results, test_exit_code = test_dispatcher.RunTests(
684 tests, runner_factory, devices, shard=True, test_timeout=None,
685 num_retries=args.num_retries)
687 results.AddTestRunResults(test_results)
689 # Only allow exit code escalation
690 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
691 exit_code = test_exit_code
693 if args.device_flags:
694 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
695 args.device_flags)
697 report_results.LogFull(
698 results=results,
699 test_type='Instrumentation',
700 test_package=os.path.basename(args.test_apk),
701 annotation=args.annotations,
702 flakiness_server=args.flakiness_dashboard_server)
704 if args.json_results_file:
705 json_results.GenerateJsonResultsFile(results, args.json_results_file)
707 return exit_code
710 def _RunUIAutomatorTests(args, devices):
711 """Subcommand of RunTestsCommands which runs uiautomator tests."""
712 uiautomator_options = ProcessUIAutomatorOptions(args)
714 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
716 results, exit_code = test_dispatcher.RunTests(
717 tests, runner_factory, devices, shard=True, test_timeout=None,
718 num_retries=args.num_retries)
720 report_results.LogFull(
721 results=results,
722 test_type='UIAutomator',
723 test_package=os.path.basename(args.test_jar),
724 annotation=args.annotations,
725 flakiness_server=args.flakiness_dashboard_server)
727 if args.json_results_file:
728 json_results.GenerateJsonResultsFile(results, args.json_results_file)
730 return exit_code
733 def _RunJUnitTests(args):
734 """Subcommand of RunTestsCommand which runs junit tests."""
735 runner_factory, tests = junit_setup.Setup(args)
736 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
737 return exit_code
740 def _RunMonkeyTests(args, devices):
741 """Subcommand of RunTestsCommands which runs monkey tests."""
742 monkey_options = ProcessMonkeyTestOptions(args)
744 runner_factory, tests = monkey_setup.Setup(monkey_options)
746 results, exit_code = test_dispatcher.RunTests(
747 tests, runner_factory, devices, shard=False, test_timeout=None,
748 num_retries=args.num_retries)
750 report_results.LogFull(
751 results=results,
752 test_type='Monkey',
753 test_package='Monkey')
755 if args.json_results_file:
756 json_results.GenerateJsonResultsFile(results, args.json_results_file)
758 return exit_code
761 def _RunPerfTests(args):
762 """Subcommand of RunTestsCommands which runs perf tests."""
763 perf_options = ProcessPerfTestOptions(args)
765 # Just save a simple json with a list of test names.
766 if perf_options.output_json_list:
767 return perf_test_runner.OutputJsonList(
768 perf_options.steps, perf_options.output_json_list)
770 if perf_options.output_chartjson_data:
771 return perf_test_runner.OutputChartjson(
772 perf_options.print_step, perf_options.output_chartjson_data)
774 # Just print the results from a single previously executed step.
775 if perf_options.print_step:
776 return perf_test_runner.PrintTestOutput(perf_options.print_step)
778 runner_factory, tests, devices = perf_setup.Setup(perf_options)
780 # shard=False means that each device will get the full list of tests
781 # and then each one will decide their own affinity.
782 # shard=True means each device will pop the next test available from a queue,
783 # which increases throughput but have no affinity.
784 results, _ = test_dispatcher.RunTests(
785 tests, runner_factory, devices, shard=False, test_timeout=None,
786 num_retries=args.num_retries)
788 report_results.LogFull(
789 results=results,
790 test_type='Perf',
791 test_package='Perf')
793 if args.json_results_file:
794 json_results.GenerateJsonResultsFile(results, args.json_results_file)
796 if perf_options.single_step:
797 return perf_test_runner.PrintTestOutput('single_step')
799 perf_test_runner.PrintSummary(tests)
801 # Always return 0 on the sharding stage. Individual tests exit_code
802 # will be returned on the print_step stage.
803 return 0
806 def _RunPythonTests(args):
807 """Subcommand of RunTestsCommand which runs python unit tests."""
808 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
809 suite_path = suite_vars['path']
810 suite_test_modules = suite_vars['test_modules']
812 sys.path = [suite_path] + sys.path
813 try:
814 suite = unittest.TestSuite()
815 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
816 for m in suite_test_modules)
817 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
818 return 0 if runner.run(suite).wasSuccessful() else 1
819 finally:
820 sys.path = sys.path[1:]
823 def _GetAttachedDevices(test_device=None):
824 """Get all attached devices.
826 Args:
827 test_device: Name of a specific device to use.
829 Returns:
830 A list of attached devices.
832 attached_devices = []
834 attached_devices = android_commands.GetAttachedDevices()
835 if test_device:
836 assert test_device in attached_devices, (
837 'Did not find device %s among attached device. Attached devices: %s'
838 % (test_device, ', '.join(attached_devices)))
839 attached_devices = [test_device]
841 assert attached_devices, 'No devices attached.'
843 return sorted(attached_devices)
846 def RunTestsCommand(args, parser):
847 """Checks test type and dispatches to the appropriate function.
849 Args:
850 args: argparse.Namespace object.
851 parser: argparse.ArgumentParser object.
853 Returns:
854 Integer indicated exit code.
856 Raises:
857 Exception: Unknown command name passed in, or an exception from an
858 individual test runner.
860 command = args.command
862 ProcessCommonOptions(args)
864 if args.enable_platform_mode:
865 return RunTestsInPlatformMode(args, parser)
867 if command in constants.LOCAL_MACHINE_TESTS:
868 devices = []
869 else:
870 devices = _GetAttachedDevices(args.test_device)
872 forwarder.Forwarder.RemoveHostLog()
873 if not ports.ResetTestServerPortAllocation():
874 raise Exception('Failed to reset test server port.')
876 if command == 'gtest':
877 return _RunGTests(args, devices)
878 elif command == 'linker':
879 return _RunLinkerTests(args, devices)
880 elif command == 'instrumentation':
881 return _RunInstrumentationTests(args, devices)
882 elif command == 'uiautomator':
883 return _RunUIAutomatorTests(args, devices)
884 elif command == 'junit':
885 return _RunJUnitTests(args)
886 elif command == 'monkey':
887 return _RunMonkeyTests(args, devices)
888 elif command == 'perf':
889 return _RunPerfTests(args)
890 elif command == 'python':
891 return _RunPythonTests(args)
892 else:
893 raise Exception('Unknown test type.')
896 _SUPPORTED_IN_PLATFORM_MODE = [
897 # TODO(jbudorick): Add support for more test types.
898 'gtest', 'uirobot',
902 def RunTestsInPlatformMode(args, parser):
904 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
905 parser.error('%s is not yet supported in platform mode' % args.command)
907 with environment_factory.CreateEnvironment(args, parser.error) as env:
908 with test_instance_factory.CreateTestInstance(args, parser.error) as test:
909 with test_run_factory.CreateTestRun(
910 args, env, test, parser.error) as test_run:
911 results = test_run.RunTests()
913 if args.trigger:
914 return 0 # Not returning results, only triggering.
916 report_results.LogFull(
917 results=results,
918 test_type=test.TestType(),
919 test_package=test_run.TestPackage(),
920 annotation=getattr(args, 'annotations', None),
921 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
923 if args.json_results_file:
924 json_results.GenerateJsonResultsFile(
925 results, args.json_results_file)
927 return 0 if results.DidRunPass() else 1
930 CommandConfigTuple = collections.namedtuple(
931 'CommandConfigTuple',
932 ['add_options_func', 'help_txt'])
933 VALID_COMMANDS = {
934 'gtest': CommandConfigTuple(
935 AddGTestOptions,
936 'googletest-based C++ tests'),
937 'instrumentation': CommandConfigTuple(
938 AddInstrumentationTestOptions,
939 'InstrumentationTestCase-based Java tests'),
940 'uiautomator': CommandConfigTuple(
941 AddUIAutomatorTestOptions,
942 "Tests that run via Android's uiautomator command"),
943 'junit': CommandConfigTuple(
944 AddJUnitTestOptions,
945 'JUnit4-based Java tests'),
946 'monkey': CommandConfigTuple(
947 AddMonkeyTestOptions,
948 "Tests based on Android's monkey"),
949 'perf': CommandConfigTuple(
950 AddPerfTestOptions,
951 'Performance tests'),
952 'python': CommandConfigTuple(
953 AddPythonTestOptions,
954 'Python tests based on unittest.TestCase'),
955 'linker': CommandConfigTuple(
956 AddLinkerTestOptions,
957 'Linker tests'),
958 'uirobot': CommandConfigTuple(
959 AddUirobotTestOptions,
960 'Uirobot test'),
964 def DumpThreadStacks(_signal, _frame):
965 for thread in threading.enumerate():
966 reraiser_thread.LogThreadStack(thread)
969 def main():
970 signal.signal(signal.SIGUSR1, DumpThreadStacks)
972 parser = argparse.ArgumentParser()
973 command_parsers = parser.add_subparsers(title='test types',
974 dest='command')
976 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
977 key=lambda x: x[0]):
978 subparser = command_parsers.add_parser(
979 test_type, usage='%(prog)s [options]', help=config.help_txt)
980 config.add_options_func(subparser)
982 args = parser.parse_args()
983 return RunTestsCommand(args, parser)
986 if __name__ == '__main__':
987 sys.exit(main())