1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
12 from collections
import defaultdict
14 import mozpack
.path
as mozpath
16 import six
.moves
.urllib_parse
as urlparse
17 from mozbuild
.base
import MachCommandConditions
as conditions
18 from mozbuild
.base
import MozbuildObject
19 from mozfile
import which
20 from moztest
.resolve
import TestManifestLoader
, TestResolver
21 from redo
import retriable
23 REFERER
= "https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info"
27 class TestInfo(object):
29 Support 'mach test-info'.
32 def __init__(self
, verbose
):
33 self
.verbose
= verbose
34 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
35 self
.build_obj
= MozbuildObject
.from_environment(cwd
=here
)
37 def log_verbose(self
, what
):
42 class TestInfoTests(TestInfo
):
44 Support 'mach test-info tests': Detailed report of specified tests.
47 def __init__(self
, verbose
):
48 TestInfo
.__init
__(self
, verbose
)
51 if conditions
.is_hg(self
.build_obj
):
52 self
._hg
= which("hg")
54 raise OSError(errno
.ENOENT
, "Could not find 'hg' on PATH.")
57 if conditions
.is_git(self
.build_obj
):
58 self
._git
= which("git")
60 raise OSError(errno
.ENOENT
, "Could not find 'git' on PATH.")
62 def find_in_hg_or_git(self
, test_name
):
64 cmd
= [self
._hg
, "files", "-I", test_name
]
66 cmd
= [self
._git
, "ls-files", test_name
]
70 out
= subprocess
.check_output(cmd
, universal_newlines
=True).splitlines()
71 except subprocess
.CalledProcessError
:
75 def set_test_name(self
):
76 # Generating a unified report for a specific test is complicated
77 # by differences in the test name used in various data sources.
79 # - It is often convenient to request a report based only on
80 # a short file name, rather than the full path;
81 # - Bugs may be filed in bugzilla against a simple, short test
82 # name or the full path to the test;
83 # This function attempts to find appropriate names for different
84 # queries based on the specified test name.
86 # full_test_name is full path to file in hg (or git)
87 self
.full_test_name
= None
88 out
= self
.find_in_hg_or_git(self
.test_name
)
89 if out
and len(out
) == 1:
90 self
.full_test_name
= out
[0]
91 elif out
and len(out
) > 1:
92 print("Ambiguous test name specified. Found:")
96 out
= self
.find_in_hg_or_git("**/%s*" % self
.test_name
)
97 if out
and len(out
) == 1:
98 self
.full_test_name
= out
[0]
99 elif out
and len(out
) > 1:
100 print("Ambiguous test name. Found:")
103 if self
.full_test_name
:
104 self
.full_test_name
.replace(os
.sep
, posixpath
.sep
)
105 print("Found %s in source control." % self
.full_test_name
)
107 print("Unable to validate test name '%s'!" % self
.test_name
)
108 self
.full_test_name
= self
.test_name
110 # search for full_test_name in test manifests
111 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
112 resolver
= TestResolver
.from_environment(
113 cwd
=here
, loader_cls
=TestManifestLoader
115 relpath
= self
.build_obj
._wrap
_path
_argument
(self
.full_test_name
).relpath()
116 tests
= list(resolver
.resolve_tests(paths
=[relpath
]))
118 relpath
= self
.build_obj
._wrap
_path
_argument
(tests
[0]["manifest"]).relpath()
119 print("%s found in manifest %s" % (self
.full_test_name
, relpath
))
120 if tests
[0].get("flavor"):
121 print(" flavor: %s" % tests
[0]["flavor"])
122 if tests
[0].get("skip-if"):
123 print(" skip-if: %s" % tests
[0]["skip-if"])
124 if tests
[0].get("fail-if"):
125 print(" fail-if: %s" % tests
[0]["fail-if"])
126 elif len(tests
) == 0:
127 print("%s not found in any test manifest!" % self
.full_test_name
)
129 print("%s found in more than one manifest!" % self
.full_test_name
)
131 # short_name is full_test_name without path
132 self
.short_name
= None
133 name_idx
= self
.full_test_name
.rfind("/")
135 self
.short_name
= self
.full_test_name
[name_idx
+ 1 :]
136 if self
.short_name
and self
.short_name
== self
.test_name
:
137 self
.short_name
= None
139 def get_platform(self
, record
):
140 if "platform" in record
["build"]:
141 platform
= record
["build"]["platform"]
144 platform_words
= platform
.split("-")
146 # combine run and build types and eliminate duplicates
148 if "run" in record
and "type" in record
["run"]:
149 run_types
= record
["run"]["type"]
150 run_types
= run_types
if isinstance(run_types
, list) else [run_types
]
152 if "build" in record
and "type" in record
["build"]:
153 build_types
= record
["build"]["type"]
155 build_types
if isinstance(build_types
, list) else [build_types
]
157 run_types
= list(set(run_types
+ build_types
))
158 # '1proc' is used as a treeherder label but does not appear in run types
159 if "e10s" not in run_types
:
160 run_types
= run_types
+ ["1proc"]
161 for run_type
in run_types
:
162 # chunked is not interesting
163 if run_type
== "chunked":
165 # e10s is the default: implied
166 if run_type
== "e10s":
168 # sometimes a build/run type is already present in the build platform
169 if run_type
in platform_words
:
173 types_label
+= run_type
174 return "%s/%s:" % (platform
, types_label
)
176 def report_bugs(self
):
177 # Report open bugs matching test name
178 search
= self
.full_test_name
180 search
= "%s,%s" % (search
, self
.test_name
)
182 search
= "%s,%s" % (search
, self
.short_name
)
183 payload
= {"quicksearch": search
, "include_fields": "id,summary"}
184 response
= requests
.get("https://bugzilla.mozilla.org/rest/bug", payload
)
185 response
.raise_for_status()
186 json_response
= response
.json()
187 print("\nBugzilla quick search for '%s':" % search
)
188 if "bugs" in json_response
:
189 for bug
in json_response
["bugs"]:
190 print("Bug %s: %s" % (bug
["id"], bug
["summary"]))
192 print("No bugs found.")
204 self
.show_info
= show_info
206 if not self
.show_info
and not show_bugs
:
207 # by default, show everything
208 self
.show_info
= True
211 for test_name
in test_names
:
212 print("===== %s =====" % test_name
)
213 self
.test_name
= test_name
214 if len(self
.test_name
) < 6:
215 print("'%s' is too short for a test name!" % self
.test_name
)
222 class TestInfoReport(TestInfo
):
224 Support 'mach test-info report': Report of test runs summarized by
225 manifest and component.
228 def __init__(self
, verbose
):
229 TestInfo
.__init
__(self
, verbose
)
232 @retriable(attempts
=3, sleeptime
=5, sleepscale
=2)
233 def get_url(self
, target_url
):
234 # if we fail to get valid json (i.e. end point has malformed data), return {}
237 r
= requests
.get(target_url
, headers
={"User-agent": "mach-test-info/1.0"})
240 except json
.decoder
.JSONDecodeError
:
241 self
.log_verbose("Error retrieving data from %s" % target_url
)
245 def update_report(self
, by_component
, result
, path_mod
):
246 def update_item(item
, label
, value
):
247 # It is important to include any existing item value in case ActiveData
248 # returns multiple records for the same test; that can happen if the report
249 # sometimes maps more than one ActiveData record to the same path.
250 new_value
= item
.get(label
, 0) + value
251 if type(new_value
) == int:
252 item
[label
] = new_value
254 item
[label
] = float(round(new_value
, 2)) # pylint: disable=W1633
256 if "test" in result
and "tests" in by_component
:
257 test
= result
["test"]
259 test
= path_mod(test
)
260 for bc
in by_component
["tests"]:
261 for item
in by_component
["tests"][bc
]:
262 if test
== item
["test"]:
263 # pylint: disable=W1633
264 seconds
= float(round(result
.get("duration", 0), 2))
265 update_item(item
, "total run time, seconds", seconds
)
266 update_item(item
, "total runs", result
.get("count", 0))
267 update_item(item
, "skipped runs", result
.get("skips", 0))
268 update_item(item
, "failed runs", result
.get("failures", 0))
272 def path_mod_reftest(self
, path
):
273 # "<path1> == <path2>" -> "<path1>"
274 path
= path
.split(" ")[0]
275 # "<path>?<params>" -> "<path>"
276 path
= path
.split("?")[0]
277 # "<path>#<fragment>" -> "<path>"
278 path
= path
.split("#")[0]
281 def path_mod_jsreftest(self
, path
):
282 # "<path>;assert" -> "<path>"
283 path
= path
.split(";")[0]
286 def path_mod_marionette(self
, path
):
287 # "<path> <test-name>" -> "<path>"
288 path
= path
.split(" ")[0]
289 # "part1\part2" -> "part1/part2"
290 path
= path
.replace("\\", os
.path
.sep
)
293 def path_mod_wpt(self
, path
):
294 if path
[0] == os
.path
.sep
:
295 # "/<path>" -> "<path>"
297 # "<path>" -> "testing/web-platform/tests/<path>"
298 path
= os
.path
.join("testing", "web-platform", "tests", path
)
299 # "<path>?<params>" -> "<path>"
300 path
= path
.split("?")[0]
303 def path_mod_jittest(self
, path
):
304 # "part1\part2" -> "part1/part2"
305 path
= path
.replace("\\", os
.path
.sep
)
306 # "<path>" -> "js/src/jit-test/tests/<path>"
307 return os
.path
.join("js", "src", "jit-test", "tests", path
)
309 def path_mod_xpcshell(self
, path
):
310 # <manifest>.{ini|toml}:<path> -> "<path>"
311 path
= path
.split(":")[-1]
329 # provide a natural language description of the report options
332 what
.append("test manifests")
336 what
.append("test manifest annotations")
337 if show_summary
and len(what
) == 0:
338 what
.append("summary of tests only")
340 what
[-1] = "and " + what
[-1]
341 what
= ", ".join(what
)
342 d
= "Test summary report for " + what
344 d
+= ", in specified components (%s)" % components
346 d
+= ", in all components"
348 d
+= ", in specified flavor (%s)" % flavor
350 d
+= ", in specified subsuite (%s)" % subsuite
352 d
+= ", on specified paths (%s)" % paths
354 d
+= ", containing '%s'" % filter_values
356 d
+= " in manifest keys '%s'" % filter_keys
358 d
+= " in any part of manifest entry"
359 d
+= ", including historical run-time data for the last "
361 start
= datetime
.datetime
.strptime(start_date
, "%Y-%m-%d")
362 end
= datetime
.datetime
.strptime(end_date
, "%Y-%m-%d")
363 d
+= "%s days on trunk (autoland/m-c)" % ((end
- start
).days
)
364 d
+= " as of %s." % end_date
367 # TODO: this is hacked for now and very limited
368 def parse_test(self
, summary
):
369 if summary
.endswith("single tracking bug"):
370 name_part
= summary
.split("|")[0] # remove 'single tracking bug'
372 return name_part
.split()[-1] # get just the test name, not extra words
375 def get_runcount_data(self
, start
, end
):
376 # TODO: use start/end properly
377 runcounts
= self
.get_runcounts(days
=MAX_DAYS
)
378 runcounts
= self
.squash_runcounts(runcounts
, days
=MAX_DAYS
)
381 def get_testinfoall_index_url(self
):
384 index
= taskcluster
.Index(
386 "rootUrl": "https://firefox-ci-tc.services.mozilla.com",
389 route
= "gecko.v2.mozilla-central.latest.source.test-info-all"
390 queue
= taskcluster
.Queue(
392 "rootUrl": "https://firefox-ci-tc.services.mozilla.com",
396 task_id
= index
.findTask(route
)["taskId"]
397 artifacts
= queue
.listLatestArtifacts(task_id
)["artifacts"]
400 for artifact
in artifacts
:
401 if artifact
["name"].endswith("test-run-info.json"):
402 url
= queue
.buildUrl("getLatestArtifact", task_id
, artifact
["name"])
406 def get_runcounts(self
, days
=MAX_DAYS
):
408 # get historical data from test-info job artifact; if missing get fresh
409 url
= self
.get_testinfoall_index_url()
410 print("INFO: requesting runcounts url: %s" % url
)
411 testrundata
= self
.get_url(url
)
413 # fill in any holes we have
414 endday
= datetime
.datetime
.now(datetime
.timezone
.utc
) - datetime
.timedelta(
417 startday
= endday
- datetime
.timedelta(days
=days
)
419 # build list of dates with missing data
420 while startday
< endday
:
421 nextday
= startday
+ datetime
.timedelta(days
=1)
423 str(nextday
) not in testrundata
.keys()
424 or testrundata
[str(nextday
)] == {}
426 url
= "https://treeherder.mozilla.org/api/groupsummary/"
427 url
+= "?startdate=%s&enddate=%s" % (
431 urls_to_fetch
.append([str(nextday
.date()), url
])
432 testrundata
[str(nextday
.date())] = {}
436 # limit missing data collection to 5 most recent days days to reduce overall runtime
437 for date
, url
in urls_to_fetch
[-5:]:
439 testrundata
[date
] = self
.get_url(url
)
440 except requests
.exceptions
.HTTPError
:
441 # We want to see other errors, but can accept HTTPError failures
442 print(f
"Unable to retrieve results for url: {url}")
447 def squash_runcounts(self
, runcounts
, days
=MAX_DAYS
):
448 # squash all testrundata together into 1 big happy family for the last X days
449 endday
= datetime
.datetime
.now(datetime
.timezone
.utc
) - datetime
.timedelta(
452 oldest
= endday
- datetime
.timedelta(days
=days
)
454 testgroup_runinfo
= defaultdict(lambda: defaultdict(int))
457 for datekey
in runcounts
.keys():
458 # strip out older days
459 if datetime
.date
.fromisoformat(datekey
) < oldest
.date():
462 jtn
= runcounts
[datekey
].get("job_type_names", {})
464 print("Warning: Missing job type names from date: %s" % datekey
)
467 for m
in runcounts
[datekey
]["manifests"]:
468 man_name
= list(m
.keys())[0]
470 for job_type_id
, result
, classification
, count
in m
[man_name
]:
471 # format: job_type_name, result, classification, count
472 # find matching jtn, result, classification and increment 'count'
473 job_name
= jtn
[job_type_id
]
474 key
= (job_name
, result
, classification
)
475 testgroup_runinfo
[man_name
][key
] += count
477 for m
in testgroup_runinfo
:
479 list(x
) + [testgroup_runinfo
[m
][x
]] for x
in testgroup_runinfo
[m
]
483 def get_intermittent_failure_data(self
, start
, end
):
487 # i.e. https://th.m.o/api/failures/?startday=2022-06-22&endday=2022-06-29&tree=all
489 "https://treeherder.mozilla.org/api/failures/?startday=%s&endday=%s&tree=trunk"
492 if_data
= self
.get_url(url
)
493 buglist
= [x
["bug_id"] for x
in if_data
]
495 # get bug data for summary, 800 bugs at a time
496 # i.e. https://b.m.o/rest/bug?include_fields=id,product,component,summary&id=1,2,3...
499 fields
= ["id", "product", "component", "summary"]
500 for bug_index
in range(0, len(buglist
), max_bugs
):
501 bugs
= [str(x
) for x
in buglist
[bug_index
: bug_index
+ max_bugs
]]
503 print(f
"warning: found no bugs in range {bug_index}, +{max_bugs}")
506 url
= "https://bugzilla.mozilla.org/rest/bug?include_fields=%s&id=%s" % (
510 data
= self
.get_url(url
)
511 if data
and "bugs" in data
.keys():
512 bug_data
.extend(data
["bugs"])
514 # for each summary, parse filename, store component
515 # IF we find >1 bug with same testname, for now summarize as one
517 test_name
= self
.parse_test(bug
["summary"])
521 c
= int([x
["bug_count"] for x
in if_data
if x
["bug_id"] == bug
["id"]][0])
522 if test_name
not in retVal
.keys():
523 retVal
[test_name
] = {
526 "product": bug
["product"],
527 "component": bug
["component"],
529 retVal
[test_name
]["count"] += c
531 if bug
["product"] != retVal
[test_name
]["product"]:
533 "ERROR | %s | mismatched bugzilla product, bugzilla (%s) != repo (%s)"
534 % (bug
["id"], bug
["product"], retVal
[test_name
]["product"])
536 if bug
["component"] != retVal
[test_name
]["component"]:
538 "ERROR | %s | mismatched bugzilla component, bugzilla (%s) != repo (%s)"
539 % (bug
["id"], bug
["component"], retVal
[test_name
]["component"])
561 def matches_filters(test
):
563 Return True if all of the requested filter_values are found in this test;
564 if filter_keys are specified, restrict search to those test keys.
566 for value
in filter_values
:
569 if not filter_keys
or key
in filter_keys
:
570 if re
.search(value
, test
[key
]):
577 start_time
= datetime
.datetime
.now()
579 # Ensure useful report by default
584 and not show_annotations
586 show_manifests
= True
591 components
= components
.split(",")
593 filter_keys
= filter_keys
.split(",")
595 filter_values
= filter_values
.split(",")
598 display_keys
= (filter_keys
or []) + ["skip-if", "fail-if", "fails-if"]
599 display_keys
= set(display_keys
)
600 ifd
= self
.get_intermittent_failure_data(start
, end
)
603 if show_testruns
and os
.environ
.get("GECKO_HEAD_REPOSITORY", "") in [
604 "https://hg.mozilla.org/mozilla-central",
605 "https://hg.mozilla.org/try",
607 runcount
= self
.get_runcount_data(start
, end
)
609 print("Finding tests...")
610 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
611 resolver
= TestResolver
.from_environment(
612 cwd
=here
, loader_cls
=TestManifestLoader
615 resolver
.resolve_tests(paths
=paths
, flavor
=flavor
, subsuite
=subsuite
)
618 manifest_paths
= set()
620 if t
.get("manifest", None):
621 manifest_path
= t
["manifest"]
622 if t
.get("ancestor_manifest", None):
623 manifest_path
= "%s:%s" % (t
["ancestor_manifest"], t
["manifest"])
624 manifest_paths
.add(manifest_path
)
625 manifest_count
= len(manifest_paths
)
627 "Resolver found {} tests, {} manifests".format(len(tests
), manifest_count
)
631 topsrcdir
= self
.build_obj
.topsrcdir
632 by_component
["manifests"] = {}
633 manifest_paths
= list(manifest_paths
)
634 manifest_paths
.sort()
636 for manifest_path
in manifest_paths
:
637 relpath
= mozpath
.relpath(manifest_path
, topsrcdir
)
638 if mozpath
.commonprefix((manifest_path
, topsrcdir
)) != topsrcdir
:
640 relpaths
.append(relpath
)
641 reader
= self
.build_obj
.mozbuild_reader(config_mode
="empty")
642 files_info
= reader
.files_info(relpaths
)
643 for manifest_path
in manifest_paths
:
644 relpath
= mozpath
.relpath(manifest_path
, topsrcdir
)
645 if mozpath
.commonprefix((manifest_path
, topsrcdir
)) != topsrcdir
:
648 if relpath
in files_info
:
649 bug_component
= files_info
[relpath
].get("BUG_COMPONENT")
651 key
= "{}::{}".format(
652 bug_component
.product
, bug_component
.component
655 key
= "<unknown bug component>"
656 if (not components
) or (key
in components
):
657 manifest_info
= {"manifest": relpath
, "tests": 0, "skipped": 0}
658 rkey
= key
if show_components
else "all"
659 if rkey
in by_component
["manifests"]:
660 by_component
["manifests"][rkey
].append(manifest_info
)
662 by_component
["manifests"][rkey
] = [manifest_info
]
665 if t
["manifest"] == manifest_path
:
666 manifest_info
["tests"] += 1
668 manifest_info
["skipped"] += 1
669 for key
in by_component
["manifests"]:
670 by_component
["manifests"][key
].sort(key
=lambda k
: k
["manifest"])
673 by_component
["tests"] = {}
675 if show_tests
or show_summary
or show_annotations
:
681 component_set
= set()
684 known_unconditional_annotations
= ["skip", "fail", "asserts", "random"]
685 known_conditional_annotations
= [
695 relpath
= t
.get("srcdir_relpath")
696 relpaths
.append(relpath
)
697 reader
= self
.build_obj
.mozbuild_reader(config_mode
="empty")
698 files_info
= reader
.files_info(relpaths
)
700 if not matches_filters(t
):
702 if "referenced-test" in t
:
703 # Avoid double-counting reftests: disregard reference file entries
707 if key
in known_unconditional_annotations
:
708 annotation_count
+= 1
709 if key
in known_conditional_annotations
:
710 annotation_count
+= 1
711 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
712 # is the associated condition. For example, the manifestparser
713 # manifest annotation, "skip-if = os == 'win'", is expected to be
714 # encoded as t['skip-if'] = "os == 'win'".
715 # To allow for reftest manifests, t[key] may have multiple entries
716 # separated by ';', each corresponding to a condition for that test
717 # and annotation type. For example,
718 # "skip-if(Android&&webrender) skip-if(OSX)", would be
719 # encoded as t['skip-if'] = "Android&&webrender;OSX".
720 annotation_conditions
= t
[key
].split(";")
722 # if key has \n in it, we need to strip it. for manifestparser format
723 # 1) from the beginning of the line
724 # 2) different conditions if in the middle of the line
725 annotation_conditions
= [
726 x
.strip("\n") for x
in annotation_conditions
729 for condition
in annotation_conditions
:
730 temp
.extend(condition
.split("\n"))
731 annotation_conditions
= temp
733 for condition
in annotation_conditions
:
735 # Trim reftest fuzzy-if ranges: everything after the first comma
736 # eg. "Android,0-2,1-3" -> "Android"
737 condition
= condition
.split(",")[0]
738 if condition
not in conditions
:
739 conditions
[condition
] = 0
740 conditions
[condition
] += 1
742 relpath
= t
.get("srcdir_relpath")
743 if relpath
in files_info
:
744 bug_component
= files_info
[relpath
].get("BUG_COMPONENT")
746 key
= "{}::{}".format(
747 bug_component
.product
, bug_component
.component
750 key
= "<unknown bug component>"
751 if (not components
) or (key
in components
):
752 component_set
.add(key
)
753 test_info
= {"test": relpath
}
754 for test_key
in display_keys
:
755 value
= t
.get(test_key
)
757 test_info
[test_key
] = value
760 if t
.get("fails-if"):
765 if "manifest_relpath" in t
and "manifest" in t
:
766 if "web-platform" in t
["manifest_relpath"]:
767 test_info
["manifest"] = [t
["manifest"]]
769 test_info
["manifest"] = [t
["manifest_relpath"]]
771 # handle included manifests as ancestor:child
772 if t
.get("ancestor_manifest", None):
773 test_info
["manifest"] = [
775 % (t
["ancestor_manifest"], test_info
["manifest"][0])
778 # add in intermittent failure data
780 if_data
= ifd
.get(relpath
)
781 test_info
["failure_count"] = if_data
["count"]
784 for m
in test_info
["manifest"]:
785 if m
in runcount
.keys():
786 for x
in runcount
.get("m", []):
791 test_info
["total_runs"] = total_runs
794 rkey
= key
if show_components
else "all"
795 if rkey
in by_component
["tests"]:
796 # Avoid duplicates: Some test paths have multiple TestResolver
797 # entries, as when a test is included by multiple manifests.
799 for ctest
in by_component
["tests"][rkey
]:
800 if ctest
["test"] == test_info
["test"]:
804 by_component
["tests"][rkey
].append(test_info
)
806 for ti
in by_component
["tests"][rkey
]:
807 if ti
["test"] == test_info
["test"]:
809 test_info
["manifest"][0]
810 not in ti
["manifest"]
812 ti_manifest
= test_info
["manifest"]
814 "ancestor_manifest", None
816 ti_manifest
= "%s:%s" % (
817 test_info
["ancestor_manifest"],
820 ti
["manifest"].extend(ti_manifest
)
822 by_component
["tests"][rkey
] = [test_info
]
824 for key
in by_component
["tests"]:
825 by_component
["tests"][key
].sort(key
=lambda k
: k
["test"])
827 by_component
["description"] = self
.description(
843 by_component
["summary"] = {}
844 by_component
["summary"]["components"] = len(component_set
)
845 by_component
["summary"]["manifests"] = manifest_count
846 by_component
["summary"]["tests"] = test_count
847 by_component
["summary"]["failed tests"] = failed_count
848 by_component
["summary"]["skipped tests"] = skipped_count
851 by_component
["annotations"] = {}
852 by_component
["annotations"]["total annotations"] = annotation_count
853 by_component
["annotations"]["total conditions"] = condition_count
854 by_component
["annotations"]["unique conditions"] = len(conditions
)
855 by_component
["annotations"]["conditions"] = conditions
857 self
.write_report(by_component
, output_file
)
859 end_time
= datetime
.datetime
.now()
861 "%d seconds total to generate report"
862 % (end_time
- start_time
).total_seconds()
865 def write_report(self
, by_component
, output_file
):
866 json_report
= json
.dumps(by_component
, indent
=2, sort_keys
=True)
868 output_file
= os
.path
.abspath(output_file
)
869 output_dir
= os
.path
.dirname(output_file
)
870 if not os
.path
.isdir(output_dir
):
871 os
.makedirs(output_dir
)
873 with
open(output_file
, "w") as f
:
878 def report_diff(self
, before
, after
, output_file
):
880 Support for 'mach test-info report-diff'.
883 def get_file(path_or_url
):
884 if urlparse
.urlparse(path_or_url
).scheme
:
885 response
= requests
.get(path_or_url
)
886 response
.raise_for_status()
887 return json
.loads(response
.text
)
888 with
open(path_or_url
) as f
:
891 report1
= get_file(before
)
892 report2
= get_file(after
)
894 by_component
= {"tests": {}, "summary": {}}
895 self
.diff_summaries(by_component
, report1
["summary"], report2
["summary"])
896 self
.diff_all_components(by_component
, report1
["tests"], report2
["tests"])
897 self
.write_report(by_component
, output_file
)
899 def diff_summaries(self
, by_component
, summary1
, summary2
):
901 Update by_component with comparison of summaries.
903 all_keys
= set(summary1
.keys()) |
set(summary2
.keys())
905 delta
= summary2
.get(key
, 0) - summary1
.get(key
, 0)
906 by_component
["summary"]["%s delta" % key
] = delta
908 def diff_all_components(self
, by_component
, tests1
, tests2
):
910 Update by_component with any added/deleted tests, for all components.
913 self
.deleted_count
= 0
914 for component
in tests1
:
915 component1
= tests1
[component
]
916 component2
= [] if component
not in tests2
else tests2
[component
]
917 self
.diff_component(by_component
, component
, component1
, component2
)
918 for component
in tests2
:
919 if component
not in tests1
:
920 component2
= tests2
[component
]
921 self
.diff_component(by_component
, component
, [], component2
)
922 by_component
["summary"]["added tests"] = self
.added_count
923 by_component
["summary"]["deleted tests"] = self
.deleted_count
925 def diff_component(self
, by_component
, component
, component1
, component2
):
927 Update by_component[component] with any added/deleted tests for the
929 "added": tests found in component2 but missing from component1.
930 "deleted": tests found in component1 but missing from component2.
932 tests1
= set([t
["test"] for t
in component1
])
933 tests2
= set([t
["test"] for t
in component2
])
934 deleted
= tests1
- tests2
935 added
= tests2
- tests1
937 by_component
["tests"][component
] = {}
939 by_component
["tests"][component
]["deleted"] = sorted(list(deleted
))
941 by_component
["tests"][component
]["added"] = sorted(list(added
))
942 self
.added_count
+= len(added
)
943 self
.deleted_count
+= len(deleted
)
944 common
= len(tests1
.intersection(tests2
))
946 "%s: %d deleted, %d added, %d common"
947 % (component
, len(deleted
), len(added
), common
)