no bug - Bumping Firefox l10n changesets r=release a=l10n-bump DONTBUILD CLOSED TREE
[gecko.git] / python / mozperftest / mozperftest / tools.py
blobab8af66ea7cc302d17b5187c42d42b709f9ec3e9
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
4 import json
5 import os
6 import shutil
7 import tempfile
8 from pathlib import Path
11 class PerformanceChangeDetected(Exception):
12 """Raised when a performance change is detected.
14 This failure happens with regressions, and improvements. There
15 is no unique failure for each of them.
17 TODO: We eventually need to be able to distinguish between these.
18 To do so, we would need to incorporate the "lower_is_better" settings
19 into the detection tooling.
20 """
22 pass
25 def run_side_by_side(artifacts, kwargs):
26 from mozperftest_tools.side_by_side import SideBySide
28 output_specified = None
29 if "output" in kwargs:
30 output_specified = kwargs.pop("output")
32 if output_specified:
33 s = SideBySide(str(output_specified))
34 s.run(**kwargs)
35 print(f"Results can be found in {output_specified}")
36 else:
37 tempdir = tempfile.mkdtemp()
38 s = SideBySide(str(tempdir))
39 s.run(**kwargs)
40 try:
41 for file in os.listdir(tempdir):
42 if (
43 file.endswith(".mp4")
44 or file.endswith(".gif")
45 or file.endswith(".json")
47 print(f"Copying from {tempdir}/{file} to {artifacts}")
48 shutil.copy(Path(tempdir, file), artifacts)
49 finally:
50 shutil.rmtree(tempdir)
53 def _gather_task_names(kwargs):
54 task_names = kwargs.get("task_names", [])
55 if len(task_names) == 0:
56 if kwargs.get("test_name", None) is None:
57 raise Exception("No test, or task names given!")
58 if kwargs.get("platform", None) is None:
59 raise Exception("No platform, or task names given!")
60 task_names.append(kwargs["platform"] + "-" + kwargs["test_name"])
61 return task_names
64 def _get_task_splitter(task):
65 splitter = "/opt-"
66 if splitter not in task:
67 splitter = "/" + task.split("/")[-1].split("-")[0] + "-"
68 return splitter
71 def _format_changes_to_str(all_results):
72 changes_detected = None
73 for task, results in all_results.items():
74 for pltype, metrics in results["metrics-with-changes"].items():
75 for metric, changes in metrics.items():
76 for revision, diffs in changes.items():
77 if changes_detected is None:
78 changes_detected = "REVISION PL_TYPE METRIC %-DIFFERENCE\n"
79 changes_detected += f"{revision} {pltype} {metric} {str(diffs)}\n"
80 return changes_detected
83 def run_change_detector(artifacts, kwargs):
84 from mozperftest_tools.regression_detector import ChangeDetector
86 tempdir = tempfile.mkdtemp()
87 detector = ChangeDetector(tempdir)
89 all_results = {}
90 results_path = Path(artifacts, "results.json")
91 try:
92 for task in _gather_task_names(kwargs):
93 splitter = _get_task_splitter(task)
95 platform, test_name = task.split(splitter)
96 platform += splitter[:-1]
98 new_test_name = test_name
99 new_platform_name = platform
100 if kwargs["new_test_name"] is not None:
101 new_test_name = kwargs["new_test_name"]
102 if kwargs["new_platform"] is not None:
103 new_platform_name = kwargs["new_platform_name"]
105 all_changed_revisions, changed_metric_revisions = detector.detect_changes(
106 test_name=test_name,
107 new_test_name=new_test_name,
108 platform=platform,
109 new_platform=new_platform_name,
110 base_revision=kwargs["base_revision"],
111 new_revision=kwargs["new_revision"],
112 base_branch=kwargs["base_branch"],
113 new_branch=kwargs["new_branch"],
114 # Depth of -1 means auto-computed (everything in between the two given revisions),
115 # None is direct comparisons, anything else uses the new_revision as a start
116 # and goes backwards from there.
117 depth=kwargs.get("depth", None),
118 skip_download=False,
119 overwrite=False,
122 # The task names are unique, so we don't need to worry about
123 # them overwriting each other
124 all_results[task] = {}
125 all_results[task]["revisions-with-changes"] = list(all_changed_revisions)
126 all_results[task]["metrics-with-changes"] = changed_metric_revisions
128 changes_detected = _format_changes_to_str(all_results)
129 if changes_detected is not None:
130 print(changes_detected)
131 raise PerformanceChangeDetected(
132 "[ERROR] A significant performance change was detected in your patch! "
133 "See the logs above, or the results.json artifact that was produced for "
134 "more information."
137 finally:
138 shutil.rmtree(tempdir)
140 print(f"Saving change detection results to {str(results_path)}")
141 with results_path.open("w") as f:
142 json.dump(all_results, f, indent=4)
145 TOOL_RUNNERS = {
146 "side-by-side": run_side_by_side,
147 "change-detector": run_change_detector,