Bug 1717887 Part 1: Abstract RenderThread task pushing, and make it private. r=gfx...
[gecko.git] / testing / testinfo.py
blob91c70681da5f5eca13aab25ac1ae08ba120567fb
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, division, print_function
7 import datetime
8 import errno
9 import json
10 import os
11 import posixpath
12 import re
13 import requests
14 import six.moves.urllib_parse as urlparse
15 import subprocess
16 import mozpack.path as mozpath
17 from moztest.resolve import TestResolver, TestManifestLoader
18 from mozfile import which
20 from mozbuild.base import MozbuildObject, MachCommandConditions as conditions
22 REFERER = "https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info"
25 class TestInfo(object):
26 """
27 Support 'mach test-info'.
28 """
30 def __init__(self, verbose):
31 self.verbose = verbose
32 here = os.path.abspath(os.path.dirname(__file__))
33 self.build_obj = MozbuildObject.from_environment(cwd=here)
35 def log_verbose(self, what):
36 if self.verbose:
37 print(what)
40 class TestInfoTests(TestInfo):
41 """
42 Support 'mach test-info tests': Detailed report of specified tests.
43 """
45 def __init__(self, verbose):
46 TestInfo.__init__(self, verbose)
48 self._hg = None
49 if conditions.is_hg(self.build_obj):
50 self._hg = which("hg")
51 if not self._hg:
52 raise OSError(errno.ENOENT, "Could not find 'hg' on PATH.")
54 self._git = None
55 if conditions.is_git(self.build_obj):
56 self._git = which("git")
57 if not self._git:
58 raise OSError(errno.ENOENT, "Could not find 'git' on PATH.")
60 def find_in_hg_or_git(self, test_name):
61 if self._hg:
62 cmd = [self._hg, "files", "-I", test_name]
63 elif self._git:
64 cmd = [self._git, "ls-files", test_name]
65 else:
66 return None
67 try:
68 out = subprocess.check_output(cmd, universal_newlines=True).splitlines()
69 except subprocess.CalledProcessError:
70 out = None
71 return out
73 def set_test_name(self):
74 # Generating a unified report for a specific test is complicated
75 # by differences in the test name used in various data sources.
76 # Consider:
77 # - It is often convenient to request a report based only on
78 # a short file name, rather than the full path;
79 # - Bugs may be filed in bugzilla against a simple, short test
80 # name or the full path to the test;
81 # This function attempts to find appropriate names for different
82 # queries based on the specified test name.
84 # full_test_name is full path to file in hg (or git)
85 self.full_test_name = None
86 out = self.find_in_hg_or_git(self.test_name)
87 if out and len(out) == 1:
88 self.full_test_name = out[0]
89 elif out and len(out) > 1:
90 print("Ambiguous test name specified. Found:")
91 for line in out:
92 print(line)
93 else:
94 out = self.find_in_hg_or_git("**/%s*" % self.test_name)
95 if out and len(out) == 1:
96 self.full_test_name = out[0]
97 elif out and len(out) > 1:
98 print("Ambiguous test name. Found:")
99 for line in out:
100 print(line)
101 if self.full_test_name:
102 self.full_test_name.replace(os.sep, posixpath.sep)
103 print("Found %s in source control." % self.full_test_name)
104 else:
105 print("Unable to validate test name '%s'!" % self.test_name)
106 self.full_test_name = self.test_name
108 # search for full_test_name in test manifests
109 here = os.path.abspath(os.path.dirname(__file__))
110 resolver = TestResolver.from_environment(
111 cwd=here, loader_cls=TestManifestLoader
113 relpath = self.build_obj._wrap_path_argument(self.full_test_name).relpath()
114 tests = list(resolver.resolve_tests(paths=[relpath]))
115 if len(tests) == 1:
116 relpath = self.build_obj._wrap_path_argument(tests[0]["manifest"]).relpath()
117 print("%s found in manifest %s" % (self.full_test_name, relpath))
118 if tests[0].get("flavor"):
119 print(" flavor: %s" % tests[0]["flavor"])
120 if tests[0].get("skip-if"):
121 print(" skip-if: %s" % tests[0]["skip-if"])
122 if tests[0].get("fail-if"):
123 print(" fail-if: %s" % tests[0]["fail-if"])
124 elif len(tests) == 0:
125 print("%s not found in any test manifest!" % self.full_test_name)
126 else:
127 print("%s found in more than one manifest!" % self.full_test_name)
129 # short_name is full_test_name without path
130 self.short_name = None
131 name_idx = self.full_test_name.rfind("/")
132 if name_idx > 0:
133 self.short_name = self.full_test_name[name_idx + 1 :]
134 if self.short_name and self.short_name == self.test_name:
135 self.short_name = None
137 def get_platform(self, record):
138 if "platform" in record["build"]:
139 platform = record["build"]["platform"]
140 else:
141 platform = "-"
142 platform_words = platform.split("-")
143 types_label = ""
144 # combine run and build types and eliminate duplicates
145 run_types = []
146 if "run" in record and "type" in record["run"]:
147 run_types = record["run"]["type"]
148 run_types = run_types if isinstance(run_types, list) else [run_types]
149 build_types = []
150 if "build" in record and "type" in record["build"]:
151 build_types = record["build"]["type"]
152 build_types = (
153 build_types if isinstance(build_types, list) else [build_types]
155 run_types = list(set(run_types + build_types))
156 # '1proc' is used as a treeherder label but does not appear in run types
157 if "e10s" not in run_types:
158 run_types = run_types + ["1proc"]
159 for run_type in run_types:
160 # chunked is not interesting
161 if run_type == "chunked":
162 continue
163 # e10s is the default: implied
164 if run_type == "e10s":
165 continue
166 # sometimes a build/run type is already present in the build platform
167 if run_type in platform_words:
168 continue
169 if types_label:
170 types_label += "-"
171 types_label += run_type
172 return "%s/%s:" % (platform, types_label)
174 def report_bugs(self):
175 # Report open bugs matching test name
176 search = self.full_test_name
177 if self.test_name:
178 search = "%s,%s" % (search, self.test_name)
179 if self.short_name:
180 search = "%s,%s" % (search, self.short_name)
181 payload = {"quicksearch": search, "include_fields": "id,summary"}
182 response = requests.get("https://bugzilla.mozilla.org/rest/bug", payload)
183 response.raise_for_status()
184 json_response = response.json()
185 print("\nBugzilla quick search for '%s':" % search)
186 if "bugs" in json_response:
187 for bug in json_response["bugs"]:
188 print("Bug %s: %s" % (bug["id"], bug["summary"]))
189 else:
190 print("No bugs found.")
192 def report(
193 self,
194 test_names,
195 start,
196 end,
197 show_info,
198 show_bugs,
200 self.start = start
201 self.end = end
202 self.show_info = show_info
204 if not self.show_info and not show_bugs:
205 # by default, show everything
206 self.show_info = True
207 show_bugs = True
209 for test_name in test_names:
210 print("===== %s =====" % test_name)
211 self.test_name = test_name
212 if len(self.test_name) < 6:
213 print("'%s' is too short for a test name!" % self.test_name)
214 continue
215 self.set_test_name()
216 if show_bugs:
217 self.report_bugs()
220 class TestInfoReport(TestInfo):
222 Support 'mach test-info report': Report of test runs summarized by
223 manifest and component.
226 def __init__(self, verbose):
227 TestInfo.__init__(self, verbose)
228 self.threads = []
230 def update_report(self, by_component, result, path_mod):
231 def update_item(item, label, value):
232 # It is important to include any existing item value in case ActiveData
233 # returns multiple records for the same test; that can happen if the report
234 # sometimes maps more than one ActiveData record to the same path.
235 new_value = item.get(label, 0) + value
236 if type(new_value) == int:
237 item[label] = new_value
238 else:
239 item[label] = float(round(new_value, 2)) # pylint: disable=W1633
241 if "test" in result and "tests" in by_component:
242 test = result["test"]
243 if path_mod:
244 test = path_mod(test)
245 for bc in by_component["tests"]:
246 for item in by_component["tests"][bc]:
247 if test == item["test"]:
248 # pylint: disable=W1633
249 seconds = float(round(result.get("duration", 0), 2))
250 update_item(item, "total run time, seconds", seconds)
251 update_item(item, "total runs", result.get("count", 0))
252 update_item(item, "skipped runs", result.get("skips", 0))
253 update_item(item, "failed runs", result.get("failures", 0))
254 return True
255 return False
257 def path_mod_reftest(self, path):
258 # "<path1> == <path2>" -> "<path1>"
259 path = path.split(" ")[0]
260 # "<path>?<params>" -> "<path>"
261 path = path.split("?")[0]
262 # "<path>#<fragment>" -> "<path>"
263 path = path.split("#")[0]
264 return path
266 def path_mod_jsreftest(self, path):
267 # "<path>;assert" -> "<path>"
268 path = path.split(";")[0]
269 return path
271 def path_mod_marionette(self, path):
272 # "<path> <test-name>" -> "<path>"
273 path = path.split(" ")[0]
274 # "part1\part2" -> "part1/part2"
275 path = path.replace("\\", os.path.sep)
276 return path
278 def path_mod_wpt(self, path):
279 if path[0] == os.path.sep:
280 # "/<path>" -> "<path>"
281 path = path[1:]
282 # "<path>" -> "testing/web-platform/tests/<path>"
283 path = os.path.join("testing", "web-platform", "tests", path)
284 # "<path>?<params>" -> "<path>"
285 path = path.split("?")[0]
286 return path
288 def path_mod_jittest(self, path):
289 # "part1\part2" -> "part1/part2"
290 path = path.replace("\\", os.path.sep)
291 # "<path>" -> "js/src/jit-test/tests/<path>"
292 return os.path.join("js", "src", "jit-test", "tests", path)
294 def path_mod_xpcshell(self, path):
295 # <manifest>.ini:<path> -> "<path>"
296 path = path.split(".ini:")[-1]
297 return path
299 def description(
300 self,
301 components,
302 flavor,
303 subsuite,
304 paths,
305 show_manifests,
306 show_tests,
307 show_summary,
308 show_annotations,
309 filter_values,
310 filter_keys,
312 # provide a natural language description of the report options
313 what = []
314 if show_manifests:
315 what.append("test manifests")
316 if show_tests:
317 what.append("tests")
318 if show_annotations:
319 what.append("test manifest annotations")
320 if show_summary and len(what) == 0:
321 what.append("summary of tests only")
322 if len(what) > 1:
323 what[-1] = "and " + what[-1]
324 what = ", ".join(what)
325 d = "Test summary report for " + what
326 if components:
327 d += ", in specified components (%s)" % components
328 else:
329 d += ", in all components"
330 if flavor:
331 d += ", in specified flavor (%s)" % flavor
332 if subsuite:
333 d += ", in specified subsuite (%s)" % subsuite
334 if paths:
335 d += ", on specified paths (%s)" % paths
336 if filter_values:
337 d += ", containing '%s'" % filter_values
338 if filter_keys:
339 d += " in manifest keys '%s'" % filter_keys
340 else:
341 d += " in any part of manifest entry"
342 d += " as of %s." % datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
343 return d
345 def report(
346 self,
347 components,
348 flavor,
349 subsuite,
350 paths,
351 show_manifests,
352 show_tests,
353 show_summary,
354 show_annotations,
355 filter_values,
356 filter_keys,
357 show_components,
358 output_file,
360 def matches_filters(test):
362 Return True if all of the requested filter_values are found in this test;
363 if filter_keys are specified, restrict search to those test keys.
365 for value in filter_values:
366 value_found = False
367 for key in test:
368 if not filter_keys or key in filter_keys:
369 if re.search(value, test[key]):
370 value_found = True
371 break
372 if not value_found:
373 return False
374 return True
376 start_time = datetime.datetime.now()
378 # Ensure useful report by default
379 if (
380 not show_manifests
381 and not show_tests
382 and not show_summary
383 and not show_annotations
385 show_manifests = True
386 show_summary = True
388 by_component = {}
389 if components:
390 components = components.split(",")
391 if filter_keys:
392 filter_keys = filter_keys.split(",")
393 if filter_values:
394 filter_values = filter_values.split(",")
395 else:
396 filter_values = []
397 display_keys = (filter_keys or []) + ["skip-if", "fail-if", "fails-if"]
398 display_keys = set(display_keys)
400 print("Finding tests...")
401 here = os.path.abspath(os.path.dirname(__file__))
402 resolver = TestResolver.from_environment(
403 cwd=here, loader_cls=TestManifestLoader
405 tests = list(
406 resolver.resolve_tests(paths=paths, flavor=flavor, subsuite=subsuite)
409 manifest_paths = set()
410 for t in tests:
411 if "manifest" in t and t["manifest"] is not None:
412 manifest_paths.add(t["manifest"])
413 manifest_count = len(manifest_paths)
414 print(
415 "Resolver found {} tests, {} manifests".format(len(tests), manifest_count)
418 if show_manifests:
419 topsrcdir = self.build_obj.topsrcdir
420 by_component["manifests"] = {}
421 manifest_paths = list(manifest_paths)
422 manifest_paths.sort()
423 relpaths = []
424 for manifest_path in manifest_paths:
425 relpath = mozpath.relpath(manifest_path, topsrcdir)
426 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
427 continue
428 relpaths.append(relpath)
429 reader = self.build_obj.mozbuild_reader(config_mode="empty")
430 files_info = reader.files_info(relpaths)
431 for manifest_path in manifest_paths:
432 relpath = mozpath.relpath(manifest_path, topsrcdir)
433 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
434 continue
435 manifest_info = None
436 if relpath in files_info:
437 bug_component = files_info[relpath].get("BUG_COMPONENT")
438 if bug_component:
439 key = "{}::{}".format(
440 bug_component.product, bug_component.component
442 else:
443 key = "<unknown bug component>"
444 if (not components) or (key in components):
445 manifest_info = {"manifest": relpath, "tests": 0, "skipped": 0}
446 rkey = key if show_components else "all"
447 if rkey in by_component["manifests"]:
448 by_component["manifests"][rkey].append(manifest_info)
449 else:
450 by_component["manifests"][rkey] = [manifest_info]
451 if manifest_info:
452 for t in tests:
453 if t["manifest"] == manifest_path:
454 manifest_info["tests"] += 1
455 if t.get("skip-if"):
456 manifest_info["skipped"] += 1
457 for key in by_component["manifests"]:
458 by_component["manifests"][key].sort(key=lambda k: k["manifest"])
460 if show_tests:
461 by_component["tests"] = {}
463 if show_tests or show_summary or show_annotations:
464 test_count = 0
465 failed_count = 0
466 skipped_count = 0
467 annotation_count = 0
468 condition_count = 0
469 component_set = set()
470 relpaths = []
471 conditions = {}
472 known_unconditional_annotations = ["skip", "fail", "asserts", "random"]
473 known_conditional_annotations = [
474 "skip-if",
475 "fail-if",
476 "run-if",
477 "fails-if",
478 "fuzzy-if",
479 "random-if",
480 "asserts-if",
482 for t in tests:
483 relpath = t.get("srcdir_relpath")
484 relpaths.append(relpath)
485 reader = self.build_obj.mozbuild_reader(config_mode="empty")
486 files_info = reader.files_info(relpaths)
487 for t in tests:
488 if not matches_filters(t):
489 continue
490 if "referenced-test" in t:
491 # Avoid double-counting reftests: disregard reference file entries
492 continue
493 if show_annotations:
494 for key in t:
495 if key in known_unconditional_annotations:
496 annotation_count += 1
497 if key in known_conditional_annotations:
498 annotation_count += 1
499 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
500 # is the associated condition. For example, the manifestparser
501 # manifest annotation, "skip-if = os == 'win'", is expected to be
502 # encoded as t['skip-if'] = "os == 'win'".
503 # To allow for reftest manifests, t[key] may have multiple entries
504 # separated by ';', each corresponding to a condition for that test
505 # and annotation type. For example,
506 # "skip-if(Android&&webrender) skip-if(OSX)", would be
507 # encoded as t['skip-if'] = "Android&&webrender;OSX".
508 annotation_conditions = t[key].split(";")
509 for condition in annotation_conditions:
510 condition_count += 1
511 # Trim reftest fuzzy-if ranges: everything after the first comma
512 # eg. "Android,0-2,1-3" -> "Android"
513 condition = condition.split(",")[0]
514 if condition not in conditions:
515 conditions[condition] = 0
516 conditions[condition] += 1
517 test_count += 1
518 relpath = t.get("srcdir_relpath")
519 if relpath in files_info:
520 bug_component = files_info[relpath].get("BUG_COMPONENT")
521 if bug_component:
522 key = "{}::{}".format(
523 bug_component.product, bug_component.component
525 else:
526 key = "<unknown bug component>"
527 if (not components) or (key in components):
528 component_set.add(key)
529 test_info = {"test": relpath}
530 for test_key in display_keys:
531 value = t.get(test_key)
532 if value:
533 test_info[test_key] = value
534 if t.get("fail-if"):
535 failed_count += 1
536 if t.get("fails-if"):
537 failed_count += 1
538 if t.get("skip-if"):
539 skipped_count += 1
540 if show_tests:
541 rkey = key if show_components else "all"
542 if rkey in by_component["tests"]:
543 # Avoid duplicates: Some test paths have multiple TestResolver
544 # entries, as when a test is included by multiple manifests.
545 found = False
546 for ctest in by_component["tests"][rkey]:
547 if ctest["test"] == test_info["test"]:
548 found = True
549 break
550 if not found:
551 by_component["tests"][rkey].append(test_info)
552 else:
553 by_component["tests"][rkey] = [test_info]
554 if show_tests:
555 for key in by_component["tests"]:
556 by_component["tests"][key].sort(key=lambda k: k["test"])
558 by_component["description"] = self.description(
559 components,
560 flavor,
561 subsuite,
562 paths,
563 show_manifests,
564 show_tests,
565 show_summary,
566 show_annotations,
567 filter_values,
568 filter_keys,
571 if show_summary:
572 by_component["summary"] = {}
573 by_component["summary"]["components"] = len(component_set)
574 by_component["summary"]["manifests"] = manifest_count
575 by_component["summary"]["tests"] = test_count
576 by_component["summary"]["failed tests"] = failed_count
577 by_component["summary"]["skipped tests"] = skipped_count
579 if show_annotations:
580 by_component["annotations"] = {}
581 by_component["annotations"]["total annotations"] = annotation_count
582 by_component["annotations"]["total conditions"] = condition_count
583 by_component["annotations"]["unique conditions"] = len(conditions)
584 by_component["annotations"]["conditions"] = conditions
586 self.write_report(by_component, output_file)
588 end_time = datetime.datetime.now()
589 self.log_verbose(
590 "%d seconds total to generate report"
591 % (end_time - start_time).total_seconds()
594 def write_report(self, by_component, output_file):
595 json_report = json.dumps(by_component, indent=2, sort_keys=True)
596 if output_file:
597 output_file = os.path.abspath(output_file)
598 output_dir = os.path.dirname(output_file)
599 if not os.path.isdir(output_dir):
600 os.makedirs(output_dir)
602 with open(output_file, "w") as f:
603 f.write(json_report)
604 else:
605 print(json_report)
607 def report_diff(self, before, after, output_file):
609 Support for 'mach test-info report-diff'.
612 def get_file(path_or_url):
613 if urlparse.urlparse(path_or_url).scheme:
614 response = requests.get(path_or_url)
615 response.raise_for_status()
616 return json.loads(response.text)
617 with open(path_or_url) as f:
618 return json.load(f)
620 report1 = get_file(before)
621 report2 = get_file(after)
623 by_component = {"tests": {}, "summary": {}}
624 self.diff_summaries(by_component, report1["summary"], report2["summary"])
625 self.diff_all_components(by_component, report1["tests"], report2["tests"])
626 self.write_report(by_component, output_file)
628 def diff_summaries(self, by_component, summary1, summary2):
630 Update by_component with comparison of summaries.
632 all_keys = set(summary1.keys()) | set(summary2.keys())
633 for key in all_keys:
634 delta = summary2.get(key, 0) - summary1.get(key, 0)
635 by_component["summary"]["%s delta" % key] = delta
637 def diff_all_components(self, by_component, tests1, tests2):
639 Update by_component with any added/deleted tests, for all components.
641 self.added_count = 0
642 self.deleted_count = 0
643 for component in tests1:
644 component1 = tests1[component]
645 component2 = [] if component not in tests2 else tests2[component]
646 self.diff_component(by_component, component, component1, component2)
647 for component in tests2:
648 if component not in tests1:
649 component2 = tests2[component]
650 self.diff_component(by_component, component, [], component2)
651 by_component["summary"]["added tests"] = self.added_count
652 by_component["summary"]["deleted tests"] = self.deleted_count
654 def diff_component(self, by_component, component, component1, component2):
656 Update by_component[component] with any added/deleted tests for the
657 named component.
658 "added": tests found in component2 but missing from component1.
659 "deleted": tests found in component1 but missing from component2.
661 tests1 = set([t["test"] for t in component1])
662 tests2 = set([t["test"] for t in component2])
663 deleted = tests1 - tests2
664 added = tests2 - tests1
665 if deleted or added:
666 by_component["tests"][component] = {}
667 if deleted:
668 by_component["tests"][component]["deleted"] = sorted(list(deleted))
669 if added:
670 by_component["tests"][component]["added"] = sorted(list(added))
671 self.added_count += len(added)
672 self.deleted_count += len(deleted)
673 common = len(tests1.intersection(tests2))
674 self.log_verbose(
675 "%s: %d deleted, %d added, %d common"
676 % (component, len(deleted), len(added), common)