BlinkGCPlugin: Handle omitted trace() in intermediary classes.
[chromium-blink-merge.git] / build / android / test_runner.py
blob368ac06aff109931487a0f4b1497f16e292a2f4c
1 #!/usr/bin/env python
3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file.
7 """Runs all types of tests from one unified interface."""
9 import argparse
10 import collections
11 import logging
12 import os
13 import shutil
14 import signal
15 import sys
16 import threading
17 import unittest
19 from pylib import android_commands
20 from pylib import constants
21 from pylib import forwarder
22 from pylib import ports
23 from pylib.base import base_test_result
24 from pylib.base import environment_factory
25 from pylib.base import test_dispatcher
26 from pylib.base import test_instance_factory
27 from pylib.base import test_run_factory
28 from pylib.gtest import gtest_config
29 from pylib.gtest import setup as gtest_setup
30 from pylib.gtest import test_options as gtest_test_options
31 from pylib.linker import setup as linker_setup
32 from pylib.host_driven import setup as host_driven_setup
33 from pylib.instrumentation import setup as instrumentation_setup
34 from pylib.instrumentation import test_options as instrumentation_test_options
35 from pylib.junit import setup as junit_setup
36 from pylib.junit import test_dispatcher as junit_dispatcher
37 from pylib.monkey import setup as monkey_setup
38 from pylib.monkey import test_options as monkey_test_options
39 from pylib.perf import setup as perf_setup
40 from pylib.perf import test_options as perf_test_options
41 from pylib.perf import test_runner as perf_test_runner
42 from pylib.results import json_results
43 from pylib.results import report_results
44 from pylib.uiautomator import setup as uiautomator_setup
45 from pylib.uiautomator import test_options as uiautomator_test_options
46 from pylib.utils import apk_helper
47 from pylib.utils import base_error
48 from pylib.utils import reraiser_thread
49 from pylib.utils import run_tests_helper
52 def AddCommonOptions(parser):
53 """Adds all common options to |parser|."""
55 group = parser.add_argument_group('Common Options')
57 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
59 debug_or_release_group = group.add_mutually_exclusive_group()
60 debug_or_release_group.add_argument(
61 '--debug', action='store_const', const='Debug', dest='build_type',
62 default=default_build_type,
63 help=('If set, run test suites under out/Debug. '
64 'Default is env var BUILDTYPE or Debug.'))
65 debug_or_release_group.add_argument(
66 '--release', action='store_const', const='Release', dest='build_type',
67 help=('If set, run test suites under out/Release. '
68 'Default is env var BUILDTYPE or Debug.'))
70 group.add_argument('--build-directory', dest='build_directory',
71 help=('Path to the directory in which build files are'
72 ' located (should not include build type)'))
73 group.add_argument('--output-directory', dest='output_directory',
74 help=('Path to the directory in which build files are'
75 ' located (must include build type). This will take'
76 ' precedence over --debug, --release and'
77 ' --build-directory'))
78 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
79 help=('Number of retries for a test before '
80 'giving up (default: %(default)s).'))
81 group.add_argument('-v',
82 '--verbose',
83 dest='verbose_count',
84 default=0,
85 action='count',
86 help='Verbose level (multiple times for more)')
87 group.add_argument('--flakiness-dashboard-server',
88 dest='flakiness_dashboard_server',
89 help=('Address of the server that is hosting the '
90 'Chrome for Android flakiness dashboard.'))
91 group.add_argument('--enable-platform-mode', action='store_true',
92 help=('Run the test scripts in platform mode, which '
93 'conceptually separates the test runner from the '
94 '"device" (local or remote, real or emulated) on '
95 'which the tests are running. [experimental]'))
96 group.add_argument('-e', '--environment', default='local',
97 choices=constants.VALID_ENVIRONMENTS,
98 help='Test environment to run in (default: %(default)s).')
99 group.add_argument('--adb-path',
100 help=('Specify the absolute path of the adb binary that '
101 'should be used.'))
102 group.add_argument('--json-results-file', dest='json_results_file',
103 help='If set, will dump results in JSON form '
104 'to specified file.')
106 def ProcessCommonOptions(args):
107 """Processes and handles all common options."""
108 run_tests_helper.SetLogLevel(args.verbose_count)
109 constants.SetBuildType(args.build_type)
110 if args.build_directory:
111 constants.SetBuildDirectory(args.build_directory)
112 if args.output_directory:
113 constants.SetOutputDirectort(args.output_directory)
114 if args.adb_path:
115 constants.SetAdbPath(args.adb_path)
116 # Some things such as Forwarder require ADB to be in the environment path.
117 adb_dir = os.path.dirname(constants.GetAdbPath())
118 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
119 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
122 def AddRemoteDeviceOptions(parser):
123 group = parser.add_argument_group('Remote Device Options')
125 group.add_argument('--trigger',
126 help=('Only triggers the test if set. Stores test_run_id '
127 'in given file path. '))
128 group.add_argument('--collect',
129 help=('Only collects the test results if set. '
130 'Gets test_run_id from given file path.'))
131 group.add_argument('--remote-device', action='append',
132 help='Device type to run test on.')
133 group.add_argument('--results-path',
134 help='File path to download results to.')
135 group.add_argument('--api-protocol',
136 help='HTTP protocol to use. (http or https)')
137 group.add_argument('--api-address',
138 help='Address to send HTTP requests.')
139 group.add_argument('--api-port',
140 help='Port to send HTTP requests to.')
141 group.add_argument('--runner-type',
142 help='Type of test to run as.')
143 group.add_argument('--runner-package',
144 help='Package name of test.')
145 group.add_argument('--device-type',
146 choices=constants.VALID_DEVICE_TYPES,
147 help=('Type of device to run on. iOS or android'))
148 group.add_argument('--device-oem', action='append',
149 help='Device OEM to run on.')
150 group.add_argument('--remote-device-file',
151 help=('File with JSON to select remote device. '
152 'Overrides all other flags.'))
153 group.add_argument('--remote-device-timeout', type=int,
154 help='Times to retry finding remote device')
156 device_os_group = group.add_mutually_exclusive_group()
157 device_os_group.add_argument('--remote-device-minimum-os',
158 help='Minimum OS on device.')
159 device_os_group.add_argument('--remote-device-os', action='append',
160 help='OS to have on the device.')
162 api_secret_group = group.add_mutually_exclusive_group()
163 api_secret_group.add_argument('--api-secret', default='',
164 help='API secret for remote devices.')
165 api_secret_group.add_argument('--api-secret-file', default='',
166 help='Path to file that contains API secret.')
168 api_key_group = group.add_mutually_exclusive_group()
169 api_key_group.add_argument('--api-key', default='',
170 help='API key for remote devices.')
171 api_key_group.add_argument('--api-key-file', default='',
172 help='Path to file that contains API key.')
175 def AddDeviceOptions(parser):
176 """Adds device options to |parser|."""
177 group = parser.add_argument_group(title='Device Options')
178 group.add_argument('-c', dest='cleanup_test_files',
179 help='Cleanup test files on the device after run',
180 action='store_true')
181 group.add_argument('--tool',
182 dest='tool',
183 help=('Run the test under a tool '
184 '(use --tool help to list them)'))
185 group.add_argument('-d', '--device', dest='test_device',
186 help=('Target device for the test suite '
187 'to run on.'))
190 def AddGTestOptions(parser):
191 """Adds gtest options to |parser|."""
193 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
194 + gtest_config.EXPERIMENTAL_TEST_SUITES)
196 group = parser.add_argument_group('GTest Options')
197 group.add_argument('-s', '--suite', dest='suite_name',
198 nargs='+', metavar='SUITE_NAME', required=True,
199 help=('Executable name of the test suite to run. '
200 'Available suites include (but are not limited to): '
201 '%s' % ', '.join('"%s"' % s for s in gtest_suites)))
202 group.add_argument('--gtest_also_run_disabled_tests',
203 '--gtest-also-run-disabled-tests',
204 dest='run_disabled', action='store_true',
205 help='Also run disabled tests if applicable.')
206 group.add_argument('-a', '--test-arguments', dest='test_arguments',
207 default='',
208 help='Additional arguments to pass to the test.')
209 group.add_argument('-t', dest='timeout', type=int, default=60,
210 help='Timeout to wait for each test '
211 '(default: %(default)s).')
212 group.add_argument('--isolate_file_path',
213 '--isolate-file-path',
214 dest='isolate_file_path',
215 help='.isolate file path to override the default '
216 'path')
218 filter_group = group.add_mutually_exclusive_group()
219 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter',
220 dest='test_filter',
221 help='googletest-style filter string.')
222 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file',
223 help='Path to file that contains googletest-style '
224 'filter strings. (Lines will be joined with '
225 '":" to create a single filter string.)')
227 AddDeviceOptions(parser)
228 AddCommonOptions(parser)
229 AddRemoteDeviceOptions(parser)
232 def AddLinkerTestOptions(parser):
233 group = parser.add_argument_group('Linker Test Options')
234 group.add_argument('-f', '--gtest-filter', dest='test_filter',
235 help='googletest-style filter string.')
236 AddCommonOptions(parser)
237 AddDeviceOptions(parser)
240 def AddJavaTestOptions(argument_group):
241 """Adds the Java test options to |option_parser|."""
243 argument_group.add_argument(
244 '-f', '--test-filter', dest='test_filter',
245 help=('Test filter (if not fully qualified, will run all matches).'))
246 argument_group.add_argument(
247 '-A', '--annotation', dest='annotation_str',
248 help=('Comma-separated list of annotations. Run only tests with any of '
249 'the given annotations. An annotation can be either a key or a '
250 'key-values pair. A test that has no annotation is considered '
251 '"SmallTest".'))
252 argument_group.add_argument(
253 '-E', '--exclude-annotation', dest='exclude_annotation_str',
254 help=('Comma-separated list of annotations. Exclude tests with these '
255 'annotations.'))
256 argument_group.add_argument(
257 '--screenshot', dest='screenshot_failures', action='store_true',
258 help='Capture screenshots of test failures')
259 argument_group.add_argument(
260 '--save-perf-json', action='store_true',
261 help='Saves the JSON file for each UI Perf test.')
262 argument_group.add_argument(
263 '--official-build', action='store_true', help='Run official build tests.')
264 argument_group.add_argument(
265 '--test_data', '--test-data', action='append', default=[],
266 help=('Each instance defines a directory of test data that should be '
267 'copied to the target(s) before running the tests. The argument '
268 'should be of the form <target>:<source>, <target> is relative to '
269 'the device data directory, and <source> is relative to the '
270 'chromium build directory.'))
271 argument_group.add_argument(
272 '--disable-dalvik-asserts', dest='set_asserts', action='store_false',
273 default=True, help='Removes the dalvik.vm.enableassertions property')
277 def ProcessJavaTestOptions(args):
278 """Processes options/arguments and populates |options| with defaults."""
280 # TODO(jbudorick): Handle most of this function in argparse.
281 if args.annotation_str:
282 args.annotations = args.annotation_str.split(',')
283 elif args.test_filter:
284 args.annotations = []
285 else:
286 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
287 'EnormousTest', 'IntegrationTest']
289 if args.exclude_annotation_str:
290 args.exclude_annotations = args.exclude_annotation_str.split(',')
291 else:
292 args.exclude_annotations = []
295 def AddInstrumentationTestOptions(parser):
296 """Adds Instrumentation test options to |parser|."""
298 parser.usage = '%(prog)s [options]'
300 group = parser.add_argument_group('Instrumentation Test Options')
301 AddJavaTestOptions(group)
303 java_or_python_group = group.add_mutually_exclusive_group()
304 java_or_python_group.add_argument(
305 '-j', '--java-only', action='store_false',
306 dest='run_python_tests', default=True, help='Run only the Java tests.')
307 java_or_python_group.add_argument(
308 '-p', '--python-only', action='store_false',
309 dest='run_java_tests', default=True,
310 help='Run only the host-driven tests.')
312 group.add_argument('--host-driven-root',
313 help='Root of the host-driven tests.')
314 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
315 action='store_true',
316 help='Wait for debugger.')
317 group.add_argument('--apk-under-test', dest='apk_under_test',
318 help=('the name of the apk under test.'))
319 group.add_argument('--test-apk', dest='test_apk', required=True,
320 help=('The name of the apk containing the tests '
321 '(without the .apk extension; '
322 'e.g. "ContentShellTest").'))
323 group.add_argument('--coverage-dir',
324 help=('Directory in which to place all generated '
325 'EMMA coverage files.'))
326 group.add_argument('--device-flags', dest='device_flags', default='',
327 help='The relative filepath to a file containing '
328 'command-line flags to set on the device')
329 group.add_argument('--device-flags-file', default='',
330 help='The relative filepath to a file containing '
331 'command-line flags to set on the device')
332 group.add_argument('--isolate_file_path',
333 '--isolate-file-path',
334 dest='isolate_file_path',
335 help='.isolate file path to override the default '
336 'path')
338 AddCommonOptions(parser)
339 AddDeviceOptions(parser)
340 AddRemoteDeviceOptions(parser)
343 def ProcessInstrumentationOptions(args):
344 """Processes options/arguments and populate |options| with defaults.
346 Args:
347 args: argparse.Namespace object.
349 Returns:
350 An InstrumentationOptions named tuple which contains all options relevant to
351 instrumentation tests.
354 ProcessJavaTestOptions(args)
356 if not args.host_driven_root:
357 args.run_python_tests = False
359 args.test_apk_path = os.path.join(
360 constants.GetOutDirectory(),
361 constants.SDK_BUILD_APKS_DIR,
362 '%s.apk' % args.test_apk)
363 args.test_apk_jar_path = os.path.join(
364 constants.GetOutDirectory(),
365 constants.SDK_BUILD_TEST_JAVALIB_DIR,
366 '%s.jar' % args.test_apk)
367 args.test_support_apk_path = '%sSupport%s' % (
368 os.path.splitext(args.test_apk_path))
370 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
372 # TODO(jbudorick): Get rid of InstrumentationOptions.
373 return instrumentation_test_options.InstrumentationOptions(
374 args.tool,
375 args.cleanup_test_files,
376 args.annotations,
377 args.exclude_annotations,
378 args.test_filter,
379 args.test_data,
380 args.save_perf_json,
381 args.screenshot_failures,
382 args.wait_for_debugger,
383 args.coverage_dir,
384 args.test_apk,
385 args.test_apk_path,
386 args.test_apk_jar_path,
387 args.test_runner,
388 args.test_support_apk_path,
389 args.device_flags,
390 args.isolate_file_path,
391 args.set_asserts
395 def AddUIAutomatorTestOptions(parser):
396 """Adds UI Automator test options to |parser|."""
398 group = parser.add_argument_group('UIAutomator Test Options')
399 AddJavaTestOptions(group)
400 group.add_argument(
401 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
402 metavar='PACKAGE', help='Package under test.')
403 group.add_argument(
404 '--test-jar', dest='test_jar', required=True,
405 help=('The name of the dexed jar containing the tests (without the '
406 '.dex.jar extension). Alternatively, this can be a full path '
407 'to the jar.'))
409 AddCommonOptions(parser)
410 AddDeviceOptions(parser)
413 def ProcessUIAutomatorOptions(args):
414 """Processes UIAutomator options/arguments.
416 Args:
417 args: argparse.Namespace object.
419 Returns:
420 A UIAutomatorOptions named tuple which contains all options relevant to
421 uiautomator tests.
424 ProcessJavaTestOptions(args)
426 if os.path.exists(args.test_jar):
427 # The dexed JAR is fully qualified, assume the info JAR lives along side.
428 args.uiautomator_jar = args.test_jar
429 else:
430 args.uiautomator_jar = os.path.join(
431 constants.GetOutDirectory(),
432 constants.SDK_BUILD_JAVALIB_DIR,
433 '%s.dex.jar' % args.test_jar)
434 args.uiautomator_info_jar = (
435 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
436 '_java.jar')
438 return uiautomator_test_options.UIAutomatorOptions(
439 args.tool,
440 args.cleanup_test_files,
441 args.annotations,
442 args.exclude_annotations,
443 args.test_filter,
444 args.test_data,
445 args.save_perf_json,
446 args.screenshot_failures,
447 args.uiautomator_jar,
448 args.uiautomator_info_jar,
449 args.package,
450 args.set_asserts)
453 def AddJUnitTestOptions(parser):
454 """Adds junit test options to |parser|."""
456 group = parser.add_argument_group('JUnit Test Options')
457 group.add_argument(
458 '-s', '--test-suite', dest='test_suite', required=True,
459 help=('JUnit test suite to run.'))
460 group.add_argument(
461 '-f', '--test-filter', dest='test_filter',
462 help='Filters tests googletest-style.')
463 group.add_argument(
464 '--package-filter', dest='package_filter',
465 help='Filters tests by package.')
466 group.add_argument(
467 '--runner-filter', dest='runner_filter',
468 help='Filters tests by runner class. Must be fully qualified.')
469 group.add_argument(
470 '--sdk-version', dest='sdk_version', type=int,
471 help='The Android SDK version.')
472 AddCommonOptions(parser)
475 def AddMonkeyTestOptions(parser):
476 """Adds monkey test options to |parser|."""
478 group = parser.add_argument_group('Monkey Test Options')
479 group.add_argument(
480 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
481 metavar='PACKAGE', help='Package under test.')
482 group.add_argument(
483 '--event-count', default=10000, type=int,
484 help='Number of events to generate (default: %(default)s).')
485 group.add_argument(
486 '--category', default='',
487 help='A list of allowed categories.')
488 group.add_argument(
489 '--throttle', default=100, type=int,
490 help='Delay between events (ms) (default: %(default)s). ')
491 group.add_argument(
492 '--seed', type=int,
493 help=('Seed value for pseudo-random generator. Same seed value generates '
494 'the same sequence of events. Seed is randomized by default.'))
495 group.add_argument(
496 '--extra-args', default='',
497 help=('String of other args to pass to the command verbatim.'))
499 AddCommonOptions(parser)
500 AddDeviceOptions(parser)
502 def ProcessMonkeyTestOptions(args):
503 """Processes all monkey test options.
505 Args:
506 args: argparse.Namespace object.
508 Returns:
509 A MonkeyOptions named tuple which contains all options relevant to
510 monkey tests.
512 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
513 category = args.category
514 if category:
515 category = args.category.split(',')
517 # TODO(jbudorick): Get rid of MonkeyOptions.
518 return monkey_test_options.MonkeyOptions(
519 args.verbose_count,
520 args.package,
521 args.event_count,
522 category,
523 args.throttle,
524 args.seed,
525 args.extra_args)
527 def AddUirobotTestOptions(parser):
528 """Adds uirobot test options to |option_parser|."""
529 group = parser.add_argument_group('Uirobot Test Options')
531 group.add_argument('--app-under-test', required=True,
532 help='APK to run tests on.')
533 group.add_argument(
534 '--minutes', default=5, type=int,
535 help='Number of minutes to run uirobot test [default: %(default)s].')
537 AddCommonOptions(parser)
538 AddDeviceOptions(parser)
539 AddRemoteDeviceOptions(parser)
541 def AddPerfTestOptions(parser):
542 """Adds perf test options to |parser|."""
544 group = parser.add_argument_group('Perf Test Options')
546 class SingleStepAction(argparse.Action):
547 def __call__(self, parser, namespace, values, option_string=None):
548 if values and not namespace.single_step:
549 parser.error('single step command provided, '
550 'but --single-step not specified.')
551 elif namespace.single_step and not values:
552 parser.error('--single-step specified, '
553 'but no single step command provided.')
554 setattr(namespace, self.dest, values)
556 step_group = group.add_mutually_exclusive_group(required=True)
557 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
558 # This requires removing "--" from client calls.
559 step_group.add_argument(
560 '--single-step', action='store_true',
561 help='Execute the given command with retries, but only print the result '
562 'for the "most successful" round.')
563 step_group.add_argument(
564 '--steps',
565 help='JSON file containing the list of commands to run.')
566 step_group.add_argument(
567 '--print-step',
568 help='The name of a previously executed perf step to print.')
570 group.add_argument(
571 '--output-json-list',
572 help='Write a simple list of names from --steps into the given file.')
573 group.add_argument(
574 '--collect-chartjson-data',
575 action='store_true',
576 help='Cache the chartjson output from each step for later use.')
577 group.add_argument(
578 '--output-chartjson-data',
579 default='',
580 help='Write out chartjson into the given file.')
581 group.add_argument(
582 '--flaky-steps',
583 help=('A JSON file containing steps that are flaky '
584 'and will have its exit code ignored.'))
585 group.add_argument(
586 '--no-timeout', action='store_true',
587 help=('Do not impose a timeout. Each perf step is responsible for '
588 'implementing the timeout logic.'))
589 group.add_argument(
590 '-f', '--test-filter',
591 help=('Test filter (will match against the names listed in --steps).'))
592 group.add_argument(
593 '--dry-run', action='store_true',
594 help='Just print the steps without executing.')
595 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
596 help='If --single-step is specified, the command to run.')
597 AddCommonOptions(parser)
598 AddDeviceOptions(parser)
601 def ProcessPerfTestOptions(args):
602 """Processes all perf test options.
604 Args:
605 args: argparse.Namespace object.
607 Returns:
608 A PerfOptions named tuple which contains all options relevant to
609 perf tests.
611 # TODO(jbudorick): Move single_step handling down into the perf tests.
612 if args.single_step:
613 args.single_step = ' '.join(args.single_step_command)
614 # TODO(jbudorick): Get rid of PerfOptions.
615 return perf_test_options.PerfOptions(
616 args.steps, args.flaky_steps, args.output_json_list,
617 args.print_step, args.no_timeout, args.test_filter,
618 args.dry_run, args.single_step, args.collect_chartjson_data,
619 args.output_chartjson_data)
622 def AddPythonTestOptions(parser):
623 group = parser.add_argument_group('Python Test Options')
624 group.add_argument(
625 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
626 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
627 help='Name of the test suite to run.')
628 AddCommonOptions(parser)
631 def _RunGTests(args, devices):
632 """Subcommand of RunTestsCommands which runs gtests."""
633 exit_code = 0
634 for suite_name in args.suite_name:
635 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
636 # into the gtest code.
637 gtest_options = gtest_test_options.GTestOptions(
638 args.tool,
639 args.cleanup_test_files,
640 args.test_filter,
641 args.run_disabled,
642 args.test_arguments,
643 args.timeout,
644 args.isolate_file_path,
645 suite_name)
646 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
648 results, test_exit_code = test_dispatcher.RunTests(
649 tests, runner_factory, devices, shard=True, test_timeout=None,
650 num_retries=args.num_retries)
652 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
653 exit_code = test_exit_code
655 report_results.LogFull(
656 results=results,
657 test_type='Unit test',
658 test_package=suite_name,
659 flakiness_server=args.flakiness_dashboard_server)
661 if args.json_results_file:
662 json_results.GenerateJsonResultsFile(results, args.json_results_file)
664 if os.path.isdir(constants.ISOLATE_DEPS_DIR):
665 shutil.rmtree(constants.ISOLATE_DEPS_DIR)
667 return exit_code
670 def _RunLinkerTests(args, devices):
671 """Subcommand of RunTestsCommands which runs linker tests."""
672 runner_factory, tests = linker_setup.Setup(args, devices)
674 results, exit_code = test_dispatcher.RunTests(
675 tests, runner_factory, devices, shard=True, test_timeout=60,
676 num_retries=args.num_retries)
678 report_results.LogFull(
679 results=results,
680 test_type='Linker test',
681 test_package='ChromiumLinkerTest')
683 if args.json_results_file:
684 json_results.GenerateJsonResultsFile(results, args.json_results_file)
686 return exit_code
689 def _RunInstrumentationTests(args, devices):
690 """Subcommand of RunTestsCommands which runs instrumentation tests."""
691 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
693 instrumentation_options = ProcessInstrumentationOptions(args)
695 if len(devices) > 1 and args.wait_for_debugger:
696 logging.warning('Debugger can not be sharded, using first available device')
697 devices = devices[:1]
699 results = base_test_result.TestRunResults()
700 exit_code = 0
702 if args.run_java_tests:
703 runner_factory, tests = instrumentation_setup.Setup(
704 instrumentation_options, devices)
706 test_results, exit_code = test_dispatcher.RunTests(
707 tests, runner_factory, devices, shard=True, test_timeout=None,
708 num_retries=args.num_retries)
710 results.AddTestRunResults(test_results)
712 if args.run_python_tests:
713 runner_factory, tests = host_driven_setup.InstrumentationSetup(
714 args.host_driven_root, args.official_build,
715 instrumentation_options)
717 if tests:
718 test_results, test_exit_code = test_dispatcher.RunTests(
719 tests, runner_factory, devices, shard=True, test_timeout=None,
720 num_retries=args.num_retries)
722 results.AddTestRunResults(test_results)
724 # Only allow exit code escalation
725 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
726 exit_code = test_exit_code
728 if args.device_flags:
729 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
730 args.device_flags)
732 report_results.LogFull(
733 results=results,
734 test_type='Instrumentation',
735 test_package=os.path.basename(args.test_apk),
736 annotation=args.annotations,
737 flakiness_server=args.flakiness_dashboard_server)
739 if args.json_results_file:
740 json_results.GenerateJsonResultsFile(results, args.json_results_file)
742 return exit_code
745 def _RunUIAutomatorTests(args, devices):
746 """Subcommand of RunTestsCommands which runs uiautomator tests."""
747 uiautomator_options = ProcessUIAutomatorOptions(args)
749 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
751 results, exit_code = test_dispatcher.RunTests(
752 tests, runner_factory, devices, shard=True, test_timeout=None,
753 num_retries=args.num_retries)
755 report_results.LogFull(
756 results=results,
757 test_type='UIAutomator',
758 test_package=os.path.basename(args.test_jar),
759 annotation=args.annotations,
760 flakiness_server=args.flakiness_dashboard_server)
762 if args.json_results_file:
763 json_results.GenerateJsonResultsFile(results, args.json_results_file)
765 return exit_code
768 def _RunJUnitTests(args):
769 """Subcommand of RunTestsCommand which runs junit tests."""
770 runner_factory, tests = junit_setup.Setup(args)
771 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
772 return exit_code
775 def _RunMonkeyTests(args, devices):
776 """Subcommand of RunTestsCommands which runs monkey tests."""
777 monkey_options = ProcessMonkeyTestOptions(args)
779 runner_factory, tests = monkey_setup.Setup(monkey_options)
781 results, exit_code = test_dispatcher.RunTests(
782 tests, runner_factory, devices, shard=False, test_timeout=None,
783 num_retries=args.num_retries)
785 report_results.LogFull(
786 results=results,
787 test_type='Monkey',
788 test_package='Monkey')
790 if args.json_results_file:
791 json_results.GenerateJsonResultsFile(results, args.json_results_file)
793 return exit_code
796 def _RunPerfTests(args):
797 """Subcommand of RunTestsCommands which runs perf tests."""
798 perf_options = ProcessPerfTestOptions(args)
800 # Just save a simple json with a list of test names.
801 if perf_options.output_json_list:
802 return perf_test_runner.OutputJsonList(
803 perf_options.steps, perf_options.output_json_list)
805 # Just print the results from a single previously executed step.
806 if perf_options.print_step:
807 return perf_test_runner.PrintTestOutput(
808 perf_options.print_step, perf_options.output_chartjson_data)
810 runner_factory, tests, devices = perf_setup.Setup(perf_options)
812 # shard=False means that each device will get the full list of tests
813 # and then each one will decide their own affinity.
814 # shard=True means each device will pop the next test available from a queue,
815 # which increases throughput but have no affinity.
816 results, _ = test_dispatcher.RunTests(
817 tests, runner_factory, devices, shard=False, test_timeout=None,
818 num_retries=args.num_retries)
820 report_results.LogFull(
821 results=results,
822 test_type='Perf',
823 test_package='Perf')
825 if args.json_results_file:
826 json_results.GenerateJsonResultsFile(results, args.json_results_file)
828 if perf_options.single_step:
829 return perf_test_runner.PrintTestOutput('single_step')
831 perf_test_runner.PrintSummary(tests)
833 # Always return 0 on the sharding stage. Individual tests exit_code
834 # will be returned on the print_step stage.
835 return 0
838 def _RunPythonTests(args):
839 """Subcommand of RunTestsCommand which runs python unit tests."""
840 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
841 suite_path = suite_vars['path']
842 suite_test_modules = suite_vars['test_modules']
844 sys.path = [suite_path] + sys.path
845 try:
846 suite = unittest.TestSuite()
847 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
848 for m in suite_test_modules)
849 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
850 return 0 if runner.run(suite).wasSuccessful() else 1
851 finally:
852 sys.path = sys.path[1:]
855 def _GetAttachedDevices(test_device=None):
856 """Get all attached devices.
858 Args:
859 test_device: Name of a specific device to use.
861 Returns:
862 A list of attached devices.
864 attached_devices = []
866 attached_devices = android_commands.GetAttachedDevices()
867 if test_device:
868 assert test_device in attached_devices, (
869 'Did not find device %s among attached device. Attached devices: %s'
870 % (test_device, ', '.join(attached_devices)))
871 attached_devices = [test_device]
873 assert attached_devices, 'No devices attached.'
875 return sorted(attached_devices)
878 def RunTestsCommand(args, parser):
879 """Checks test type and dispatches to the appropriate function.
881 Args:
882 args: argparse.Namespace object.
883 parser: argparse.ArgumentParser object.
885 Returns:
886 Integer indicated exit code.
888 Raises:
889 Exception: Unknown command name passed in, or an exception from an
890 individual test runner.
892 command = args.command
894 ProcessCommonOptions(args)
896 if args.enable_platform_mode:
897 return RunTestsInPlatformMode(args, parser)
899 if command in constants.LOCAL_MACHINE_TESTS:
900 devices = []
901 else:
902 devices = _GetAttachedDevices(args.test_device)
904 forwarder.Forwarder.RemoveHostLog()
905 if not ports.ResetTestServerPortAllocation():
906 raise Exception('Failed to reset test server port.')
908 if command == 'gtest':
909 return _RunGTests(args, devices)
910 elif command == 'linker':
911 return _RunLinkerTests(args, devices)
912 elif command == 'instrumentation':
913 return _RunInstrumentationTests(args, devices)
914 elif command == 'uiautomator':
915 return _RunUIAutomatorTests(args, devices)
916 elif command == 'junit':
917 return _RunJUnitTests(args)
918 elif command == 'monkey':
919 return _RunMonkeyTests(args, devices)
920 elif command == 'perf':
921 return _RunPerfTests(args)
922 elif command == 'python':
923 return _RunPythonTests(args)
924 else:
925 raise Exception('Unknown test type.')
928 _SUPPORTED_IN_PLATFORM_MODE = [
929 # TODO(jbudorick): Add support for more test types.
930 'gtest',
931 'instrumentation',
932 'uirobot',
936 def RunTestsInPlatformMode(args, parser):
938 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
939 parser.error('%s is not yet supported in platform mode' % args.command)
941 with environment_factory.CreateEnvironment(args, parser.error) as env:
942 with test_instance_factory.CreateTestInstance(args, parser.error) as test:
943 with test_run_factory.CreateTestRun(
944 args, env, test, parser.error) as test_run:
945 results = test_run.RunTests()
947 if args.environment == 'remote_device' and args.trigger:
948 return 0 # Not returning results, only triggering.
950 report_results.LogFull(
951 results=results,
952 test_type=test.TestType(),
953 test_package=test_run.TestPackage(),
954 annotation=getattr(args, 'annotations', None),
955 flakiness_server=getattr(args, 'flakiness_dashboard_server', None))
957 if args.json_results_file:
958 json_results.GenerateJsonResultsFile(
959 results, args.json_results_file)
961 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
964 CommandConfigTuple = collections.namedtuple(
965 'CommandConfigTuple',
966 ['add_options_func', 'help_txt'])
967 VALID_COMMANDS = {
968 'gtest': CommandConfigTuple(
969 AddGTestOptions,
970 'googletest-based C++ tests'),
971 'instrumentation': CommandConfigTuple(
972 AddInstrumentationTestOptions,
973 'InstrumentationTestCase-based Java tests'),
974 'uiautomator': CommandConfigTuple(
975 AddUIAutomatorTestOptions,
976 "Tests that run via Android's uiautomator command"),
977 'junit': CommandConfigTuple(
978 AddJUnitTestOptions,
979 'JUnit4-based Java tests'),
980 'monkey': CommandConfigTuple(
981 AddMonkeyTestOptions,
982 "Tests based on Android's monkey"),
983 'perf': CommandConfigTuple(
984 AddPerfTestOptions,
985 'Performance tests'),
986 'python': CommandConfigTuple(
987 AddPythonTestOptions,
988 'Python tests based on unittest.TestCase'),
989 'linker': CommandConfigTuple(
990 AddLinkerTestOptions,
991 'Linker tests'),
992 'uirobot': CommandConfigTuple(
993 AddUirobotTestOptions,
994 'Uirobot test'),
998 def DumpThreadStacks(_signal, _frame):
999 for thread in threading.enumerate():
1000 reraiser_thread.LogThreadStack(thread)
1003 def main():
1004 signal.signal(signal.SIGUSR1, DumpThreadStacks)
1006 parser = argparse.ArgumentParser()
1007 command_parsers = parser.add_subparsers(title='test types',
1008 dest='command')
1010 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
1011 key=lambda x: x[0]):
1012 subparser = command_parsers.add_parser(
1013 test_type, usage='%(prog)s [options]', help=config.help_txt)
1014 config.add_options_func(subparser)
1016 args = parser.parse_args()
1018 try:
1019 return RunTestsCommand(args, parser)
1020 except base_error.BaseError as e:
1021 logging.exception('Error occurred.')
1022 if e.is_infra_error:
1023 return constants.INFRA_EXIT_CODE
1024 else:
1025 return constants.ERROR_EXIT_CODE
1026 except: # pylint: disable=W0702
1027 logging.exception('Unrecognized error occurred.')
1028 return constants.ERROR_EXIT_CODE
1031 if __name__ == '__main__':
1032 sys.exit(main())