1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__
import absolute_import
, division
, print_function
14 import six
.moves
.urllib_parse
as urlparse
16 import mozpack
.path
as mozpath
17 from moztest
.resolve
import TestResolver
, TestManifestLoader
18 from mozfile
import which
20 from mozbuild
.base
import MozbuildObject
, MachCommandConditions
as conditions
22 REFERER
= "https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info"
25 class TestInfo(object):
27 Support 'mach test-info'.
30 def __init__(self
, verbose
):
31 self
.verbose
= verbose
32 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
33 self
.build_obj
= MozbuildObject
.from_environment(cwd
=here
)
35 def log_verbose(self
, what
):
40 class TestInfoTests(TestInfo
):
42 Support 'mach test-info tests': Detailed report of specified tests.
45 def __init__(self
, verbose
):
46 TestInfo
.__init
__(self
, verbose
)
49 if conditions
.is_hg(self
.build_obj
):
50 self
._hg
= which("hg")
52 raise OSError(errno
.ENOENT
, "Could not find 'hg' on PATH.")
55 if conditions
.is_git(self
.build_obj
):
56 self
._git
= which("git")
58 raise OSError(errno
.ENOENT
, "Could not find 'git' on PATH.")
60 def find_in_hg_or_git(self
, test_name
):
62 cmd
= [self
._hg
, "files", "-I", test_name
]
64 cmd
= [self
._git
, "ls-files", test_name
]
68 out
= subprocess
.check_output(cmd
, universal_newlines
=True).splitlines()
69 except subprocess
.CalledProcessError
:
73 def set_test_name(self
):
74 # Generating a unified report for a specific test is complicated
75 # by differences in the test name used in various data sources.
77 # - It is often convenient to request a report based only on
78 # a short file name, rather than the full path;
79 # - Bugs may be filed in bugzilla against a simple, short test
80 # name or the full path to the test;
81 # This function attempts to find appropriate names for different
82 # queries based on the specified test name.
84 # full_test_name is full path to file in hg (or git)
85 self
.full_test_name
= None
86 out
= self
.find_in_hg_or_git(self
.test_name
)
87 if out
and len(out
) == 1:
88 self
.full_test_name
= out
[0]
89 elif out
and len(out
) > 1:
90 print("Ambiguous test name specified. Found:")
94 out
= self
.find_in_hg_or_git("**/%s*" % self
.test_name
)
95 if out
and len(out
) == 1:
96 self
.full_test_name
= out
[0]
97 elif out
and len(out
) > 1:
98 print("Ambiguous test name. Found:")
101 if self
.full_test_name
:
102 self
.full_test_name
.replace(os
.sep
, posixpath
.sep
)
103 print("Found %s in source control." % self
.full_test_name
)
105 print("Unable to validate test name '%s'!" % self
.test_name
)
106 self
.full_test_name
= self
.test_name
108 # search for full_test_name in test manifests
109 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
110 resolver
= TestResolver
.from_environment(
111 cwd
=here
, loader_cls
=TestManifestLoader
113 relpath
= self
.build_obj
._wrap
_path
_argument
(self
.full_test_name
).relpath()
114 tests
= list(resolver
.resolve_tests(paths
=[relpath
]))
116 relpath
= self
.build_obj
._wrap
_path
_argument
(tests
[0]["manifest"]).relpath()
117 print("%s found in manifest %s" % (self
.full_test_name
, relpath
))
118 if tests
[0].get("flavor"):
119 print(" flavor: %s" % tests
[0]["flavor"])
120 if tests
[0].get("skip-if"):
121 print(" skip-if: %s" % tests
[0]["skip-if"])
122 if tests
[0].get("fail-if"):
123 print(" fail-if: %s" % tests
[0]["fail-if"])
124 elif len(tests
) == 0:
125 print("%s not found in any test manifest!" % self
.full_test_name
)
127 print("%s found in more than one manifest!" % self
.full_test_name
)
129 # short_name is full_test_name without path
130 self
.short_name
= None
131 name_idx
= self
.full_test_name
.rfind("/")
133 self
.short_name
= self
.full_test_name
[name_idx
+ 1 :]
134 if self
.short_name
and self
.short_name
== self
.test_name
:
135 self
.short_name
= None
137 def get_platform(self
, record
):
138 if "platform" in record
["build"]:
139 platform
= record
["build"]["platform"]
142 platform_words
= platform
.split("-")
144 # combine run and build types and eliminate duplicates
146 if "run" in record
and "type" in record
["run"]:
147 run_types
= record
["run"]["type"]
148 run_types
= run_types
if isinstance(run_types
, list) else [run_types
]
150 if "build" in record
and "type" in record
["build"]:
151 build_types
= record
["build"]["type"]
153 build_types
if isinstance(build_types
, list) else [build_types
]
155 run_types
= list(set(run_types
+ build_types
))
156 # '1proc' is used as a treeherder label but does not appear in run types
157 if "e10s" not in run_types
:
158 run_types
= run_types
+ ["1proc"]
159 for run_type
in run_types
:
160 # chunked is not interesting
161 if run_type
== "chunked":
163 # e10s is the default: implied
164 if run_type
== "e10s":
166 # sometimes a build/run type is already present in the build platform
167 if run_type
in platform_words
:
171 types_label
+= run_type
172 return "%s/%s:" % (platform
, types_label
)
174 def report_bugs(self
):
175 # Report open bugs matching test name
176 search
= self
.full_test_name
178 search
= "%s,%s" % (search
, self
.test_name
)
180 search
= "%s,%s" % (search
, self
.short_name
)
181 payload
= {"quicksearch": search
, "include_fields": "id,summary"}
182 response
= requests
.get("https://bugzilla.mozilla.org/rest/bug", payload
)
183 response
.raise_for_status()
184 json_response
= response
.json()
185 print("\nBugzilla quick search for '%s':" % search
)
186 if "bugs" in json_response
:
187 for bug
in json_response
["bugs"]:
188 print("Bug %s: %s" % (bug
["id"], bug
["summary"]))
190 print("No bugs found.")
202 self
.show_info
= show_info
204 if not self
.show_info
and not show_bugs
:
205 # by default, show everything
206 self
.show_info
= True
209 for test_name
in test_names
:
210 print("===== %s =====" % test_name
)
211 self
.test_name
= test_name
212 if len(self
.test_name
) < 6:
213 print("'%s' is too short for a test name!" % self
.test_name
)
220 class TestInfoReport(TestInfo
):
222 Support 'mach test-info report': Report of test runs summarized by
223 manifest and component.
226 def __init__(self
, verbose
):
227 TestInfo
.__init
__(self
, verbose
)
230 def update_report(self
, by_component
, result
, path_mod
):
231 def update_item(item
, label
, value
):
232 # It is important to include any existing item value in case ActiveData
233 # returns multiple records for the same test; that can happen if the report
234 # sometimes maps more than one ActiveData record to the same path.
235 new_value
= item
.get(label
, 0) + value
236 if type(new_value
) == int:
237 item
[label
] = new_value
239 item
[label
] = float(round(new_value
, 2)) # pylint: disable=W1633
241 if "test" in result
and "tests" in by_component
:
242 test
= result
["test"]
244 test
= path_mod(test
)
245 for bc
in by_component
["tests"]:
246 for item
in by_component
["tests"][bc
]:
247 if test
== item
["test"]:
248 # pylint: disable=W1633
249 seconds
= float(round(result
.get("duration", 0), 2))
250 update_item(item
, "total run time, seconds", seconds
)
251 update_item(item
, "total runs", result
.get("count", 0))
252 update_item(item
, "skipped runs", result
.get("skips", 0))
253 update_item(item
, "failed runs", result
.get("failures", 0))
257 def path_mod_reftest(self
, path
):
258 # "<path1> == <path2>" -> "<path1>"
259 path
= path
.split(" ")[0]
260 # "<path>?<params>" -> "<path>"
261 path
= path
.split("?")[0]
262 # "<path>#<fragment>" -> "<path>"
263 path
= path
.split("#")[0]
266 def path_mod_jsreftest(self
, path
):
267 # "<path>;assert" -> "<path>"
268 path
= path
.split(";")[0]
271 def path_mod_marionette(self
, path
):
272 # "<path> <test-name>" -> "<path>"
273 path
= path
.split(" ")[0]
274 # "part1\part2" -> "part1/part2"
275 path
= path
.replace("\\", os
.path
.sep
)
278 def path_mod_wpt(self
, path
):
279 if path
[0] == os
.path
.sep
:
280 # "/<path>" -> "<path>"
282 # "<path>" -> "testing/web-platform/tests/<path>"
283 path
= os
.path
.join("testing", "web-platform", "tests", path
)
284 # "<path>?<params>" -> "<path>"
285 path
= path
.split("?")[0]
288 def path_mod_jittest(self
, path
):
289 # "part1\part2" -> "part1/part2"
290 path
= path
.replace("\\", os
.path
.sep
)
291 # "<path>" -> "js/src/jit-test/tests/<path>"
292 return os
.path
.join("js", "src", "jit-test", "tests", path
)
294 def path_mod_xpcshell(self
, path
):
295 # <manifest>.ini:<path> -> "<path>"
296 path
= path
.split(".ini:")[-1]
312 # provide a natural language description of the report options
315 what
.append("test manifests")
319 what
.append("test manifest annotations")
320 if show_summary
and len(what
) == 0:
321 what
.append("summary of tests only")
323 what
[-1] = "and " + what
[-1]
324 what
= ", ".join(what
)
325 d
= "Test summary report for " + what
327 d
+= ", in specified components (%s)" % components
329 d
+= ", in all components"
331 d
+= ", in specified flavor (%s)" % flavor
333 d
+= ", in specified subsuite (%s)" % subsuite
335 d
+= ", on specified paths (%s)" % paths
337 d
+= ", containing '%s'" % filter_values
339 d
+= " in manifest keys '%s'" % filter_keys
341 d
+= " in any part of manifest entry"
342 d
+= " as of %s." % datetime
.datetime
.now().strftime("%Y-%m-%d %H:%M")
360 def matches_filters(test
):
362 Return True if all of the requested filter_values are found in this test;
363 if filter_keys are specified, restrict search to those test keys.
365 for value
in filter_values
:
368 if not filter_keys
or key
in filter_keys
:
369 if re
.search(value
, test
[key
]):
376 start_time
= datetime
.datetime
.now()
378 # Ensure useful report by default
383 and not show_annotations
385 show_manifests
= True
390 components
= components
.split(",")
392 filter_keys
= filter_keys
.split(",")
394 filter_values
= filter_values
.split(",")
397 display_keys
= (filter_keys
or []) + ["skip-if", "fail-if", "fails-if"]
398 display_keys
= set(display_keys
)
400 print("Finding tests...")
401 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
402 resolver
= TestResolver
.from_environment(
403 cwd
=here
, loader_cls
=TestManifestLoader
406 resolver
.resolve_tests(paths
=paths
, flavor
=flavor
, subsuite
=subsuite
)
409 manifest_paths
= set()
411 if "manifest" in t
and t
["manifest"] is not None:
412 manifest_paths
.add(t
["manifest"])
413 manifest_count
= len(manifest_paths
)
415 "Resolver found {} tests, {} manifests".format(len(tests
), manifest_count
)
419 topsrcdir
= self
.build_obj
.topsrcdir
420 by_component
["manifests"] = {}
421 manifest_paths
= list(manifest_paths
)
422 manifest_paths
.sort()
424 for manifest_path
in manifest_paths
:
425 relpath
= mozpath
.relpath(manifest_path
, topsrcdir
)
426 if mozpath
.commonprefix((manifest_path
, topsrcdir
)) != topsrcdir
:
428 relpaths
.append(relpath
)
429 reader
= self
.build_obj
.mozbuild_reader(config_mode
="empty")
430 files_info
= reader
.files_info(relpaths
)
431 for manifest_path
in manifest_paths
:
432 relpath
= mozpath
.relpath(manifest_path
, topsrcdir
)
433 if mozpath
.commonprefix((manifest_path
, topsrcdir
)) != topsrcdir
:
436 if relpath
in files_info
:
437 bug_component
= files_info
[relpath
].get("BUG_COMPONENT")
439 key
= "{}::{}".format(
440 bug_component
.product
, bug_component
.component
443 key
= "<unknown bug component>"
444 if (not components
) or (key
in components
):
445 manifest_info
= {"manifest": relpath
, "tests": 0, "skipped": 0}
446 rkey
= key
if show_components
else "all"
447 if rkey
in by_component
["manifests"]:
448 by_component
["manifests"][rkey
].append(manifest_info
)
450 by_component
["manifests"][rkey
] = [manifest_info
]
453 if t
["manifest"] == manifest_path
:
454 manifest_info
["tests"] += 1
456 manifest_info
["skipped"] += 1
457 for key
in by_component
["manifests"]:
458 by_component
["manifests"][key
].sort(key
=lambda k
: k
["manifest"])
461 by_component
["tests"] = {}
463 if show_tests
or show_summary
or show_annotations
:
469 component_set
= set()
472 known_unconditional_annotations
= ["skip", "fail", "asserts", "random"]
473 known_conditional_annotations
= [
483 relpath
= t
.get("srcdir_relpath")
484 relpaths
.append(relpath
)
485 reader
= self
.build_obj
.mozbuild_reader(config_mode
="empty")
486 files_info
= reader
.files_info(relpaths
)
488 if not matches_filters(t
):
490 if "referenced-test" in t
:
491 # Avoid double-counting reftests: disregard reference file entries
495 if key
in known_unconditional_annotations
:
496 annotation_count
+= 1
497 if key
in known_conditional_annotations
:
498 annotation_count
+= 1
499 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
500 # is the associated condition. For example, the manifestparser
501 # manifest annotation, "skip-if = os == 'win'", is expected to be
502 # encoded as t['skip-if'] = "os == 'win'".
503 # To allow for reftest manifests, t[key] may have multiple entries
504 # separated by ';', each corresponding to a condition for that test
505 # and annotation type. For example,
506 # "skip-if(Android&&webrender) skip-if(OSX)", would be
507 # encoded as t['skip-if'] = "Android&&webrender;OSX".
508 annotation_conditions
= t
[key
].split(";")
509 for condition
in annotation_conditions
:
511 # Trim reftest fuzzy-if ranges: everything after the first comma
512 # eg. "Android,0-2,1-3" -> "Android"
513 condition
= condition
.split(",")[0]
514 if condition
not in conditions
:
515 conditions
[condition
] = 0
516 conditions
[condition
] += 1
518 relpath
= t
.get("srcdir_relpath")
519 if relpath
in files_info
:
520 bug_component
= files_info
[relpath
].get("BUG_COMPONENT")
522 key
= "{}::{}".format(
523 bug_component
.product
, bug_component
.component
526 key
= "<unknown bug component>"
527 if (not components
) or (key
in components
):
528 component_set
.add(key
)
529 test_info
= {"test": relpath
}
530 for test_key
in display_keys
:
531 value
= t
.get(test_key
)
533 test_info
[test_key
] = value
536 if t
.get("fails-if"):
541 rkey
= key
if show_components
else "all"
542 if rkey
in by_component
["tests"]:
543 # Avoid duplicates: Some test paths have multiple TestResolver
544 # entries, as when a test is included by multiple manifests.
546 for ctest
in by_component
["tests"][rkey
]:
547 if ctest
["test"] == test_info
["test"]:
551 by_component
["tests"][rkey
].append(test_info
)
553 by_component
["tests"][rkey
] = [test_info
]
555 for key
in by_component
["tests"]:
556 by_component
["tests"][key
].sort(key
=lambda k
: k
["test"])
558 by_component
["description"] = self
.description(
572 by_component
["summary"] = {}
573 by_component
["summary"]["components"] = len(component_set
)
574 by_component
["summary"]["manifests"] = manifest_count
575 by_component
["summary"]["tests"] = test_count
576 by_component
["summary"]["failed tests"] = failed_count
577 by_component
["summary"]["skipped tests"] = skipped_count
580 by_component
["annotations"] = {}
581 by_component
["annotations"]["total annotations"] = annotation_count
582 by_component
["annotations"]["total conditions"] = condition_count
583 by_component
["annotations"]["unique conditions"] = len(conditions
)
584 by_component
["annotations"]["conditions"] = conditions
586 self
.write_report(by_component
, output_file
)
588 end_time
= datetime
.datetime
.now()
590 "%d seconds total to generate report"
591 % (end_time
- start_time
).total_seconds()
594 def write_report(self
, by_component
, output_file
):
595 json_report
= json
.dumps(by_component
, indent
=2, sort_keys
=True)
597 output_file
= os
.path
.abspath(output_file
)
598 output_dir
= os
.path
.dirname(output_file
)
599 if not os
.path
.isdir(output_dir
):
600 os
.makedirs(output_dir
)
602 with
open(output_file
, "w") as f
:
607 def report_diff(self
, before
, after
, output_file
):
609 Support for 'mach test-info report-diff'.
612 def get_file(path_or_url
):
613 if urlparse
.urlparse(path_or_url
).scheme
:
614 response
= requests
.get(path_or_url
)
615 response
.raise_for_status()
616 return json
.loads(response
.text
)
617 with
open(path_or_url
) as f
:
620 report1
= get_file(before
)
621 report2
= get_file(after
)
623 by_component
= {"tests": {}, "summary": {}}
624 self
.diff_summaries(by_component
, report1
["summary"], report2
["summary"])
625 self
.diff_all_components(by_component
, report1
["tests"], report2
["tests"])
626 self
.write_report(by_component
, output_file
)
628 def diff_summaries(self
, by_component
, summary1
, summary2
):
630 Update by_component with comparison of summaries.
632 all_keys
= set(summary1
.keys()) |
set(summary2
.keys())
634 delta
= summary2
.get(key
, 0) - summary1
.get(key
, 0)
635 by_component
["summary"]["%s delta" % key
] = delta
637 def diff_all_components(self
, by_component
, tests1
, tests2
):
639 Update by_component with any added/deleted tests, for all components.
642 self
.deleted_count
= 0
643 for component
in tests1
:
644 component1
= tests1
[component
]
645 component2
= [] if component
not in tests2
else tests2
[component
]
646 self
.diff_component(by_component
, component
, component1
, component2
)
647 for component
in tests2
:
648 if component
not in tests1
:
649 component2
= tests2
[component
]
650 self
.diff_component(by_component
, component
, [], component2
)
651 by_component
["summary"]["added tests"] = self
.added_count
652 by_component
["summary"]["deleted tests"] = self
.deleted_count
654 def diff_component(self
, by_component
, component
, component1
, component2
):
656 Update by_component[component] with any added/deleted tests for the
658 "added": tests found in component2 but missing from component1.
659 "deleted": tests found in component1 but missing from component2.
661 tests1
= set([t
["test"] for t
in component1
])
662 tests2
= set([t
["test"] for t
in component2
])
663 deleted
= tests1
- tests2
664 added
= tests2
- tests1
666 by_component
["tests"][component
] = {}
668 by_component
["tests"][component
]["deleted"] = sorted(list(deleted
))
670 by_component
["tests"][component
]["added"] = sorted(list(added
))
671 self
.added_count
+= len(added
)
672 self
.deleted_count
+= len(deleted
)
673 common
= len(tests1
.intersection(tests2
))
675 "%s: %d deleted, %d added, %d common"
676 % (component
, len(deleted
), len(added
), common
)