1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 from functools
import partial
9 from mach
.decorators
import Command
, CommandArgument
, SubCommand
10 from mozbuild
.base
import MachCommandConditions
as conditions
13 "linux-xpcshell": "perftest-linux-try-xpcshell",
14 "mac-xpcshell": "perftest-macosx-try-xpcshell",
15 "linux-browsertime": "perftest-linux-try-browsertime",
16 "mac-browsertime": "perftest-macosx-try-browsertime",
17 "win-browsertimee": "perftest-windows-try-browsertime",
21 HERE
= os
.path
.dirname(__file__
)
24 def get_perftest_parser():
25 from mozperftest
import PerftestArgumentParser
27 return PerftestArgumentParser
30 def get_perftest_tools_parser(tool
):
31 def tools_parser_func():
32 from mozperftest
import PerftestToolsArgumentParser
34 PerftestToolsArgumentParser
.tool
= tool
35 return PerftestToolsArgumentParser
37 return tools_parser_func
41 return run_perftest
._mach
_command
._parser
47 conditions
=[partial(conditions
.is_buildapp_in
, apps
=["firefox", "android"])],
48 description
="Run any flavor of perftest",
49 parser
=get_perftest_parser
,
51 def run_perftest(command_context
, **kwargs
):
52 # original parser that brought us there
53 original_parser
= get_parser()
55 from pathlib
import Path
57 from mozperftest
.script
import ParseError
, ScriptInfo
, ScriptType
59 # user selection with fuzzy UI
60 from mozperftest
.utils
import ON_TRY
62 if not ON_TRY
and kwargs
.get("tests", []) == []:
63 from moztest
.resolve
import TestResolver
65 from mozperftest
.fzf
.fzf
import select
67 resolver
= command_context
._spawn
(TestResolver
)
68 test_objects
= list(resolver
.resolve_tests(paths
=None, flavor
="perftest"))
69 selected
= select(test_objects
)
71 def full_path(selection
):
72 __
, script_name
, __
, location
= selection
.split(" ")
75 command_context
.topsrcdir
.rstrip(os
.sep
),
76 location
.strip(os
.sep
),
81 kwargs
["tests"] = [full_path(s
) for s
in selected
]
83 if kwargs
["tests"] == []:
84 print("\nNo selection. Bye!")
87 if len(kwargs
["tests"]) > 1:
88 print("\nSorry no support yet for multiple local perftest")
91 sel
= "\n".join(kwargs
["tests"])
92 print("\nGood job! Best selection.\n%s" % sel
)
93 # if the script is xpcshell, we can force the flavor here
94 # XXX on multi-selection, what happens if we have seeveral flavors?
96 script_info
= ScriptInfo(kwargs
["tests"][0])
97 except ParseError
as e
:
98 if e
.exception
is IsADirectoryError
:
103 if script_info
.script_type
== ScriptType
.xpcshell
:
104 kwargs
["flavor"] = script_info
.script_type
.name
106 # we set the value only if not provided (so "mobile-browser"
108 if "flavor" not in kwargs
:
109 kwargs
["flavor"] = "desktop-browser"
111 push_to_try
= kwargs
.pop("push_to_try", False)
113 sys
.path
.append(str(Path(command_context
.topsrcdir
, "tools", "tryselect")))
115 from tryselect
.push
import push_to_try
117 perftest_parameters
= {}
118 args
= script_info
.update_args(**original_parser
.get_user_args(kwargs
))
119 platform
= args
.pop("try_platform", "linux")
120 if isinstance(platform
, str):
121 platform
= [platform
]
123 platform
= ["%s-%s" % (plat
, script_info
.script_type
.name
) for plat
in platform
]
125 for plat
in platform
:
126 if plat
not in _TRY_PLATFORMS
:
127 # we can extend platform support here: linux, win, macOs
128 # by adding more jobs in taskcluster/ci/perftest/kind.yml
129 # then picking up the right one here
130 raise NotImplementedError(
131 "%r doesn't exist or is not yet supported" % plat
135 if path
.startswith(command_context
.topsrcdir
):
136 return path
[len(command_context
.topsrcdir
) :].lstrip(os
.sep
)
139 for name
, value
in args
.items():
140 # ignore values that are set to default
141 if original_parser
.get_default(name
) == value
:
144 value
= [relative(path
) for path
in value
]
145 perftest_parameters
[name
] = value
149 "tasks": [_TRY_PLATFORMS
[plat
] for plat
in platform
],
150 "perftest-options": perftest_parameters
,
152 "try_mode": "try_task_config",
155 task_config
= {"parameters": parameters
, "version": 2}
156 if args
.get("verbose"):
157 print("Pushing run to try...")
158 print(json
.dumps(task_config
, indent
=4, sort_keys
=True))
160 push_to_try("perftest", "perftest", try_task_config
=task_config
)
163 from mozperftest
.runner
import run_tests
165 run_tests(command_context
, kwargs
, original_parser
.get_user_args(kwargs
))
167 print("\nFirefox. Fast For Good.\n")
173 description
="Run perftest tests",
174 virtualenv_name
="perftest-test",
177 "tests", default
=None, nargs
="*", help="Tests to run. By default will run all"
184 help="Skip flake8 and black",
187 "-v", "--verbose", action
="store_true", default
=False, help="Verbose mode"
189 def run_tests(command_context
, **kwargs
):
190 from pathlib
import Path
192 from mozperftest
.utils
import temporary_env
195 COVERAGE_RCFILE
=str(Path(HERE
, ".coveragerc")), RUNNING_TESTS
="YES"
197 _run_tests(command_context
, **kwargs
)
200 def _run_tests(command_context
, **kwargs
):
201 from pathlib
import Path
203 from mozperftest
.utils
import ON_TRY
, checkout_python_script
, checkout_script
205 venv
= command_context
.virtualenv_manager
206 skip_linters
= kwargs
.get("skip_linters", False)
207 verbose
= kwargs
.get("verbose", False)
209 if not ON_TRY
and not skip_linters
:
213 cmd
+= " " + str(HERE
)
214 if not checkout_script(cmd
, label
="linters", display
=verbose
, verbose
=verbose
):
215 raise AssertionError("Please fix your code.")
217 # running pytest with coverage
218 # coverage is done in three steps:
219 # 1/ coverage erase => erase any previous coverage data
220 # 2/ coverage run pytest ... => run the tests and collect info
221 # 3/ coverage report => generate the report
222 tests_dir
= Path(HERE
, "tests").resolve()
223 tests
= kwargs
.get("tests", [])
225 tests
= str(tests_dir
)
226 run_coverage_check
= not skip_linters
228 run_coverage_check
= False
231 if Path(test
).exists():
233 return str(tests_dir
/ test
)
235 tests
= " ".join([_get_test(test
) for test
in tests
])
237 # on macOS + try we skip the coverage
238 # because macOS workers prevent us from installing
240 if sys
.platform
== "darwin" and ON_TRY
:
241 run_coverage_check
= False
244 if kwargs
.get("verbose"):
247 if run_coverage_check
:
248 assert checkout_python_script(
249 venv
, "coverage", ["erase"], label
="remove old coverage data"
251 args
= ["run", "-m", "pytest", options
, "--durations", "10", tests
]
252 assert checkout_python_script(
253 venv
, "coverage", args
, label
="running tests", verbose
=verbose
255 if run_coverage_check
and not checkout_python_script(
256 venv
, "coverage", ["report"], display
=True
258 raise ValueError("Coverage is too low!")
264 description
="Run perftest tools",
266 def run_tools(command_context
, **kwargs
):
268 Runs various perftest tools such as the side-by-side generator.
270 print("Runs various perftest tools such as the side-by-side generator.")
276 description
="This tool can be used to generate a side-by-side visualization of two videos. "
277 "When using this tool, make sure that the `--test-name` is an exact match, i.e. if you are "
278 "comparing the task `test-linux64-shippable-qr/opt-browsertime-tp6-firefox-linkedin-e10s` "
279 "between two revisions, then use `browsertime-tp6-firefox-linkedin-e10s` as the suite name "
280 "and `test-linux64-shippable-qr/opt` as the platform.",
281 parser
=get_perftest_tools_parser("side-by-side"),
283 def run_side_by_side(command_context
, **kwargs
):
284 from mozperftest
.runner
import run_tools
286 kwargs
["tool"] = "side-by-side"
287 run_tools(command_context
, kwargs
)
293 description
="This tool can be used to determine if there are differences between two "
294 "revisions. It can do either direct comparisons, or searching for regressions in between "
295 "two revisions (with a maximum or autocomputed depth).",
296 parser
=get_perftest_tools_parser("change-detector"),
298 def run_change_detector(command_context
, **kwargs
):
299 from mozperftest
.runner
import run_tools
301 kwargs
["tool"] = "change-detector"
302 run_tools(command_context
, kwargs
)