Bug 1641886 [wpt PR 23851] - Support interpolating contain-intrinsic-size, a=testonly
[gecko.git] / testing / testinfo.py
blobe5a7bec446328f7526a4759e9b5fa548cb976272
1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__ import absolute_import, print_function
7 import datetime
8 import errno
9 import json
10 import os
11 import posixpath
12 import re
13 import requests
14 import six.moves.urllib_parse as urlparse
15 import subprocess
16 import threading
17 import traceback
18 import mozpack.path as mozpath
19 from moztest.resolve import TestResolver, TestManifestLoader
20 from mozfile import which
22 from mozbuild.base import MozbuildObject, MachCommandConditions as conditions
24 ACTIVEDATA_RECORD_LIMIT = 10000
25 MAX_ACTIVEDATA_CONCURRENCY = 5
26 MAX_ACTIVEDATA_RETRIES = 5
27 REFERER = 'https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info'
30 class TestInfo(object):
31 """
32 Support 'mach test-info'.
33 """
34 def __init__(self, verbose):
35 self.verbose = verbose
36 here = os.path.abspath(os.path.dirname(__file__))
37 self.build_obj = MozbuildObject.from_environment(cwd=here)
38 self.total_activedata_seconds = 0
40 def log_verbose(self, what):
41 if self.verbose:
42 print(what)
44 def activedata_query(self, query):
45 start_time = datetime.datetime.now()
46 self.log_verbose(start_time)
47 self.log_verbose(json.dumps(query))
48 response = requests.post("http://activedata.allizom.org/query",
49 data=json.dumps(query),
50 headers={'referer': REFERER},
51 stream=True)
52 end_time = datetime.datetime.now()
53 self.total_activedata_seconds += (end_time - start_time).total_seconds()
54 self.log_verbose(end_time)
55 self.log_verbose(response)
56 response.raise_for_status()
57 data = response.json()["data"]
58 self.log_verbose("response length: %d" % len(data))
59 return data
62 class ActiveDataThread(threading.Thread):
63 """
64 A thread to query ActiveData and wait for its response.
65 """
66 def __init__(self, name, ti, query, context):
67 threading.Thread.__init__(self, name=name)
68 self.ti = ti
69 self.query = query
70 self.context = context
71 self.response = None
73 def run(self):
74 attempt = 1
75 while attempt < MAX_ACTIVEDATA_RETRIES and not self.response:
76 try:
77 self.response = self.ti.activedata_query(self.query)
78 if not self.response:
79 self.ti.log_verbose("%s: no data received for query" % self.name)
80 self.response = []
81 break
82 except Exception:
83 self.ti.log_verbose("%s: Exception on attempt #%d:" % (self.name, attempt))
84 traceback.print_exc()
85 attempt += 1
88 class TestInfoTests(TestInfo):
89 """
90 Support 'mach test-info tests': Detailed report of specified tests.
91 """
92 def __init__(self, verbose):
93 TestInfo.__init__(self, verbose)
95 self._hg = None
96 if conditions.is_hg(self.build_obj):
97 self._hg = which('hg')
98 if not self._hg:
99 raise OSError(errno.ENOENT, "Could not find 'hg' on PATH.")
101 self._git = None
102 if conditions.is_git(self.build_obj):
103 self._git = which('git')
104 if not self._git:
105 raise OSError(errno.ENOENT, "Could not find 'git' on PATH.")
107 def find_in_hg_or_git(self, test_name):
108 if self._hg:
109 cmd = [self._hg, 'files', '-I', test_name]
110 elif self._git:
111 cmd = [self._git, 'ls-files', test_name]
112 else:
113 return None
114 try:
115 out = subprocess.check_output(cmd, universal_newlines=True).splitlines()
116 except subprocess.CalledProcessError:
117 out = None
118 return out
120 def set_test_name(self):
121 # Generating a unified report for a specific test is complicated
122 # by differences in the test name used in various data sources.
123 # Consider:
124 # - It is often convenient to request a report based only on
125 # a short file name, rather than the full path;
126 # - Bugs may be filed in bugzilla against a simple, short test
127 # name or the full path to the test;
128 # - In ActiveData, the full path is usually used, but sometimes
129 # also includes additional path components outside of the
130 # mercurial repo (common for reftests).
131 # This function attempts to find appropriate names for different
132 # queries based on the specified test name.
134 # full_test_name is full path to file in hg (or git)
135 self.full_test_name = None
136 out = self.find_in_hg_or_git(self.test_name)
137 if out and len(out) == 1:
138 self.full_test_name = out[0]
139 elif out and len(out) > 1:
140 print("Ambiguous test name specified. Found:")
141 for line in out:
142 print(line)
143 else:
144 out = self.find_in_hg_or_git('**/%s*' % self.test_name)
145 if out and len(out) == 1:
146 self.full_test_name = out[0]
147 elif out and len(out) > 1:
148 print("Ambiguous test name. Found:")
149 for line in out:
150 print(line)
151 if self.full_test_name:
152 self.full_test_name.replace(os.sep, posixpath.sep)
153 print("Found %s in source control." % self.full_test_name)
154 else:
155 print("Unable to validate test name '%s'!" % self.test_name)
156 self.full_test_name = self.test_name
158 # search for full_test_name in test manifests
159 here = os.path.abspath(os.path.dirname(__file__))
160 resolver = TestResolver.from_environment(cwd=here, loader_cls=TestManifestLoader)
161 relpath = self.build_obj._wrap_path_argument(self.full_test_name).relpath()
162 tests = list(resolver.resolve_tests(paths=[relpath]))
163 if len(tests) == 1:
164 relpath = self.build_obj._wrap_path_argument(tests[0]['manifest']).relpath()
165 print("%s found in manifest %s" % (self.full_test_name, relpath))
166 if tests[0].get('flavor'):
167 print(" flavor: %s" % tests[0]['flavor'])
168 if tests[0].get('skip-if'):
169 print(" skip-if: %s" % tests[0]['skip-if'])
170 if tests[0].get('fail-if'):
171 print(" fail-if: %s" % tests[0]['fail-if'])
172 elif len(tests) == 0:
173 print("%s not found in any test manifest!" % self.full_test_name)
174 else:
175 print("%s found in more than one manifest!" % self.full_test_name)
177 # short_name is full_test_name without path
178 self.short_name = None
179 name_idx = self.full_test_name.rfind('/')
180 if name_idx > 0:
181 self.short_name = self.full_test_name[name_idx + 1:]
182 if self.short_name and self.short_name == self.test_name:
183 self.short_name = None
185 if not (self.show_results or self.show_durations or self.show_tasks):
186 # no need to determine ActiveData name if not querying
187 return
189 def set_activedata_test_name(self):
190 # activedata_test_name is name in ActiveData
191 self.activedata_test_name = None
192 simple_names = [
193 self.full_test_name,
194 self.test_name,
195 self.short_name
197 simple_names = [x for x in simple_names if x]
198 searches = [
199 {"in": {"result.test": simple_names}},
201 regex_names = [".*%s.*" % re.escape(x) for x in simple_names if x]
202 for r in regex_names:
203 searches.append({"regexp": {"result.test": r}})
204 query = {
205 "from": "unittest",
206 "format": "list",
207 "limit": 10,
208 "groupby": ["result.test"],
209 "where": {"and": [
210 {"or": searches},
211 {"in": {"build.branch": self.branches.split(',')}},
212 {"gt": {"run.timestamp": {"date": self.start}}},
213 {"lt": {"run.timestamp": {"date": self.end}}}
216 print("Querying ActiveData...") # Following query can take a long time
217 data = self.activedata_query(query)
218 if data and len(data) > 0:
219 self.activedata_test_name = [
220 d['result']['test']
221 for p in simple_names + regex_names
222 for d in data
223 if re.match(p + "$", d['result']['test'])
224 ][0] # first match is best match
225 if self.activedata_test_name:
226 print("Found records matching '%s' in ActiveData." %
227 self.activedata_test_name)
228 else:
229 print("Unable to find matching records in ActiveData; using %s!" %
230 self.test_name)
231 self.activedata_test_name = self.test_name
233 def get_platform(self, record):
234 if 'platform' in record['build']:
235 platform = record['build']['platform']
236 else:
237 platform = "-"
238 platform_words = platform.split('-')
239 types_label = ""
240 # combine run and build types and eliminate duplicates
241 run_types = []
242 if 'run' in record and 'type' in record['run']:
243 run_types = record['run']['type']
244 run_types = run_types if isinstance(run_types, list) else [run_types]
245 build_types = []
246 if 'build' in record and 'type' in record['build']:
247 build_types = record['build']['type']
248 build_types = build_types if isinstance(build_types, list) else [build_types]
249 run_types = list(set(run_types+build_types))
250 # '1proc' is used as a treeherder label but does not appear in run types
251 if 'e10s' not in run_types:
252 run_types = run_types + ['1proc']
253 for run_type in run_types:
254 # chunked is not interesting
255 if run_type == 'chunked':
256 continue
257 # e10s is the default: implied
258 if run_type == 'e10s':
259 continue
260 # sometimes a build/run type is already present in the build platform
261 if run_type in platform_words:
262 continue
263 if types_label:
264 types_label += "-"
265 types_label += run_type
266 return "%s/%s:" % (platform, types_label)
268 def report_test_results(self):
269 # Report test pass/fail summary from ActiveData
270 query = {
271 "from": "unittest",
272 "format": "list",
273 "limit": 100,
274 "groupby": ["build.platform", "build.type"],
275 "select": [
276 {"aggregate": "count"},
278 "name": "failures",
279 "value": {"case": [
280 {"when": {"eq": {"result.ok": "F"}}, "then": 1}
282 "aggregate": "sum",
283 "default": 0
286 "name": "skips",
287 "value": {"case": [
288 {"when": {"eq": {"result.status": "SKIP"}}, "then": 1}
290 "aggregate": "sum",
291 "default": 0
293 {"value": "run.type", "aggregate": "union"}
295 "where": {"and": [
296 {"eq": {"result.test": self.activedata_test_name}},
297 {"in": {"build.branch": self.branches.split(',')}},
298 {"gt": {"run.timestamp": {"date": self.start}}},
299 {"lt": {"run.timestamp": {"date": self.end}}}
302 print("\nTest results for %s on %s between %s and %s" %
303 (self.activedata_test_name, self.branches, self.start, self.end))
304 data = self.activedata_query(query)
305 if data and len(data) > 0:
306 data.sort(key=self.get_platform)
307 worst_rate = 0.0
308 worst_platform = None
309 total_runs = 0
310 total_failures = 0
311 for record in data:
312 platform = self.get_platform(record)
313 if platform.startswith("-"):
314 continue
315 runs = record['count']
316 total_runs = total_runs + runs
317 failures = record.get('failures', 0)
318 skips = record.get('skips', 0)
319 total_failures = total_failures + failures
320 rate = (float)(failures) / runs
321 if rate >= worst_rate:
322 worst_rate = rate
323 worst_platform = platform
324 worst_failures = failures
325 worst_runs = runs
326 print("%-40s %6d failures (%6d skipped) in %6d runs" % (
327 platform, failures, skips, runs))
328 print("\nTotal: %d failures in %d runs or %.3f failures/run" %
329 (total_failures, total_runs, (float)(total_failures) / total_runs))
330 if worst_failures > 0:
331 print("Worst rate on %s %d failures in %d runs or %.3f failures/run" %
332 (worst_platform, worst_failures, worst_runs, worst_rate))
333 else:
334 print("No test result data found.")
336 def report_test_durations(self):
337 # Report test durations summary from ActiveData
338 query = {
339 "from": "unittest",
340 "format": "list",
341 "limit": 100,
342 "groupby": ["build.platform", "build.type"],
343 "select": [
344 {"value": "result.duration",
345 "aggregate": "average", "name": "average"},
346 {"value": "result.duration", "aggregate": "min", "name": "min"},
347 {"value": "result.duration", "aggregate": "max", "name": "max"},
348 {"aggregate": "count"},
349 {"value": "run.type", "aggregate": "union"}
351 "where": {"and": [
352 {"eq": {"result.ok": "T"}},
353 {"eq": {"result.test": self.activedata_test_name}},
354 {"in": {"build.branch": self.branches.split(',')}},
355 {"gt": {"run.timestamp": {"date": self.start}}},
356 {"lt": {"run.timestamp": {"date": self.end}}}
359 data = self.activedata_query(query)
360 print("\nTest durations for %s on %s between %s and %s" %
361 (self.activedata_test_name, self.branches, self.start, self.end))
362 if data and len(data) > 0:
363 data.sort(key=self.get_platform)
364 for record in data:
365 platform = self.get_platform(record)
366 if platform.startswith("-"):
367 continue
368 print("%-40s %6.2f s (%.2f s - %.2f s over %d runs)" % (
369 platform, record['average'], record['min'],
370 record['max'], record['count']))
371 else:
372 print("No test durations found.")
374 def report_test_tasks(self):
375 # Report test tasks summary from ActiveData
376 query = {
377 "from": "unittest",
378 "format": "list",
379 "limit": 1000,
380 "select": ["build.platform", "build.type", "run.type", "run.name"],
381 "where": {"and": [
382 {"eq": {"result.test": self.activedata_test_name}},
383 {"in": {"build.branch": self.branches.split(',')}},
384 {"gt": {"run.timestamp": {"date": self.start}}},
385 {"lt": {"run.timestamp": {"date": self.end}}}
388 data = self.activedata_query(query)
389 print("\nTest tasks for %s on %s between %s and %s" %
390 (self.activedata_test_name, self.branches, self.start, self.end))
391 if data and len(data) > 0:
392 data.sort(key=self.get_platform)
393 consolidated = {}
394 for record in data:
395 platform = self.get_platform(record)
396 if platform not in consolidated:
397 consolidated[platform] = {}
398 if record['run']['name'] in consolidated[platform]:
399 consolidated[platform][record['run']['name']] += 1
400 else:
401 consolidated[platform][record['run']['name']] = 1
402 for key in sorted(consolidated.keys()):
403 tasks = ""
404 for task in consolidated[key].keys():
405 if tasks:
406 tasks += "\n%-40s " % ""
407 tasks += task
408 tasks += " in %d runs" % consolidated[key][task]
409 print("%-40s %s" % (key, tasks))
410 else:
411 print("No test tasks found.")
413 def report_bugs(self):
414 # Report open bugs matching test name
415 search = self.full_test_name
416 if self.test_name:
417 search = '%s,%s' % (search, self.test_name)
418 if self.short_name:
419 search = '%s,%s' % (search, self.short_name)
420 payload = {'quicksearch': search,
421 'include_fields': 'id,summary'}
422 response = requests.get('https://bugzilla.mozilla.org/rest/bug',
423 payload)
424 response.raise_for_status()
425 json_response = response.json()
426 print("\nBugzilla quick search for '%s':" % search)
427 if 'bugs' in json_response:
428 for bug in json_response['bugs']:
429 print("Bug %s: %s" % (bug['id'], bug['summary']))
430 else:
431 print("No bugs found.")
433 def report(self, test_names, branches, start, end,
434 show_info, show_results, show_durations, show_tasks, show_bugs):
435 self.branches = branches
436 self.start = start
437 self.end = end
438 self.show_info = show_info
439 self.show_results = show_results
440 self.show_durations = show_durations
441 self.show_tasks = show_tasks
443 if (not self.show_info and
444 not self.show_results and
445 not self.show_durations and
446 not self.show_tasks and
447 not show_bugs):
448 # by default, show everything
449 self.show_info = True
450 self.show_results = True
451 self.show_durations = True
452 self.show_tasks = True
453 show_bugs = True
455 for test_name in test_names:
456 print("===== %s =====" % test_name)
457 self.test_name = test_name
458 if len(self.test_name) < 6:
459 print("'%s' is too short for a test name!" % self.test_name)
460 continue
461 self.set_test_name()
462 if show_bugs:
463 self.report_bugs()
464 self.set_activedata_test_name()
465 if self.show_results:
466 self.report_test_results()
467 if self.show_durations:
468 self.report_test_durations()
469 if self.show_tasks:
470 self.report_test_tasks()
473 class TestInfoLongRunningTasks(TestInfo):
475 Support 'mach test-info long-tasks': Summary of tasks approaching their max-run-time.
477 def __init__(self, verbose):
478 TestInfo.__init__(self, verbose)
480 def report(self, branches, start, end, threshold_pct, filter_threshold_pct):
482 def get_long_running_ratio(record):
483 count = record['count']
484 tasks_gt_pct = record['tasks_gt_pct']
485 return count / tasks_gt_pct
487 # Search test durations in ActiveData for long-running tests
488 query = {
489 "from": "task",
490 "format": "list",
491 "groupby": ["run.name"],
492 "limit": 1000,
493 "select": [
495 "value": "task.maxRunTime",
496 "aggregate": "median",
497 "name": "max_run_time"
500 "aggregate": "count"
503 "value": {
504 "when": {
505 "gt": [
507 "div": ["action.duration", "task.maxRunTime"]
508 }, threshold_pct/100.0
511 "then": 1
513 "aggregate": "sum",
514 "name": "tasks_gt_pct"
517 "where": {"and": [
518 {"in": {"build.branch": branches.split(',')}},
519 {"gt": {"task.run.start_time": {"date": start}}},
520 {"lte": {"task.run.start_time": {"date": end}}},
521 {"eq": {"task.state": "completed"}},
524 data = self.activedata_query(query)
525 print("\nTasks nearing their max-run-time on %s between %s and %s" %
526 (branches, start, end))
527 if data and len(data) > 0:
528 filtered = []
529 for record in data:
530 if 'tasks_gt_pct' in record:
531 count = record['count']
532 tasks_gt_pct = record['tasks_gt_pct']
533 if float(tasks_gt_pct) / count > filter_threshold_pct / 100.0:
534 filtered.append(record)
535 filtered.sort(key=get_long_running_ratio)
536 if not filtered:
537 print("No long running tasks found.")
538 for record in filtered:
539 name = record['run']['name']
540 count = record['count']
541 max_run_time = record['max_run_time']
542 tasks_gt_pct = record['tasks_gt_pct']
543 print("%-55s: %d of %d runs (%.1f%%) exceeded %d%% of max-run-time (%d s)" %
544 (name, tasks_gt_pct, count, tasks_gt_pct * 100 / count,
545 threshold_pct, max_run_time))
546 else:
547 print("No tasks found.")
550 class TestInfoReport(TestInfo):
552 Support 'mach test-info report': Report of test runs summarized by
553 manifest and component.
555 def __init__(self, verbose):
556 TestInfo.__init__(self, verbose)
557 self.total_activedata_matches = 0
558 self.threads = []
560 def add_activedata_for_suite(self, label, branches, days,
561 suite_clause, tests_clause, path_mod):
562 dates_clause = {"date": "today-%dday" % days}
563 where_conditions = [
564 suite_clause,
565 {"in": {"repo.branch.name": branches.split(',')}},
566 {"gt": {"run.timestamp": dates_clause}},
568 if tests_clause:
569 where_conditions.append(tests_clause)
570 ad_query = {
571 "from": "unittest",
572 "limit": ACTIVEDATA_RECORD_LIMIT,
573 "format": "list",
574 "groupby": ["result.test"],
575 "select": [
577 "name": "result.count",
578 "aggregate": "count"
581 "name": "result.duration",
582 "value": "result.duration",
583 "aggregate": "sum"
586 "name": "result.failures",
587 "value": {"case": [
588 {"when": {"eq": {"result.ok": "F"}}, "then": 1}
590 "aggregate": "sum",
591 "default": 0
594 "name": "result.skips",
595 "value": {"case": [
596 {"when": {"eq": {"result.status": "SKIP"}}, "then": 1}
598 "aggregate": "sum",
599 "default": 0
602 "where": {"and": where_conditions}
604 t = ActiveDataThread(label, self, ad_query, path_mod)
605 self.threads.append(t)
607 def update_report(self, by_component, result, path_mod):
608 def update_item(item, label, value):
609 # It is important to include any existing item value in case ActiveData
610 # returns multiple records for the same test; that can happen if the report
611 # sometimes maps more than one ActiveData record to the same path.
612 new_value = item.get(label, 0) + value
613 if type(new_value) == int:
614 item[label] = new_value
615 else:
616 item[label] = round(new_value, 2)
618 if 'test' in result and 'tests' in by_component:
619 test = result['test']
620 if path_mod:
621 test = path_mod(test)
622 for bc in by_component['tests']:
623 for item in by_component['tests'][bc]:
624 if test == item['test']:
625 seconds = round(result.get('duration', 0), 2)
626 update_item(item, 'total run time, seconds', seconds)
627 update_item(item, 'total runs', result.get('count', 0))
628 update_item(item, 'skipped runs', result.get('skips', 0))
629 update_item(item, 'failed runs', result.get('failures', 0))
630 return True
631 return False
633 def collect_activedata_results(self, by_component):
634 # Start the first MAX_ACTIVEDATA_CONCURRENCY threads. If too many
635 # concurrent requests are made to ActiveData, the requests frequently
636 # fail (504 is the typical response).
637 for i in range(min(MAX_ACTIVEDATA_CONCURRENCY, len(self.threads))):
638 t = self.threads[i]
639 t.start()
640 # Wait for running threads (first N threads in self.threads) to complete.
641 # When a thread completes, start the next thread, process the results
642 # from the completed thread, and remove the completed thread from
643 # the thread list.
644 while len(self.threads):
645 running_threads = min(MAX_ACTIVEDATA_CONCURRENCY, len(self.threads))
646 for i in range(running_threads):
647 t = self.threads[i]
648 t.join(1)
649 if not t.isAlive():
650 ad_response = t.response
651 path_mod = t.context
652 name = t.name
653 del self.threads[i]
654 if len(self.threads) >= MAX_ACTIVEDATA_CONCURRENCY:
655 running_threads = min(MAX_ACTIVEDATA_CONCURRENCY, len(self.threads))
656 self.threads[running_threads - 1].start()
657 if ad_response:
658 if len(ad_response) >= ACTIVEDATA_RECORD_LIMIT:
659 print("%s: ActiveData query limit reached; data may be missing" % name)
660 matches = 0
661 for record in ad_response:
662 if 'result' in record:
663 result = record['result']
664 if self.update_report(by_component, result, path_mod):
665 matches += 1
666 self.log_verbose("%s: %d results; %d matches" %
667 (name, len(ad_response), matches))
668 self.total_activedata_matches += matches
669 break
671 def path_mod_reftest(self, path):
672 # "<path1> == <path2>" -> "<path1>"
673 path = path.split(' ')[0]
674 # "<path>?<params>" -> "<path>"
675 path = path.split('?')[0]
676 # "<path>#<fragment>" -> "<path>"
677 path = path.split('#')[0]
678 return path
680 def path_mod_jsreftest(self, path):
681 # "<path>;assert" -> "<path>"
682 path = path.split(';')[0]
683 return path
685 def path_mod_marionette(self, path):
686 # "<path> <test-name>" -> "<path>"
687 path = path.split(' ')[0]
688 # "part1\part2" -> "part1/part2"
689 path = path.replace('\\', os.path.sep)
690 return path
692 def path_mod_wpt(self, path):
693 if path[0] == os.path.sep:
694 # "/<path>" -> "<path>"
695 path = path[1:]
696 # "<path>" -> "testing/web-platform/tests/<path>"
697 path = os.path.join('testing', 'web-platform', 'tests', path)
698 # "<path>?<params>" -> "<path>"
699 path = path.split('?')[0]
700 return path
702 def path_mod_jittest(self, path):
703 # "part1\part2" -> "part1/part2"
704 path = path.replace('\\', os.path.sep)
705 # "<path>" -> "js/src/jit-test/tests/<path>"
706 return os.path.join('js', 'src', 'jit-test', 'tests', path)
708 def path_mod_xpcshell(self, path):
709 # <manifest>.ini:<path> -> "<path>"
710 path = path.split('.ini:')[-1]
711 return path
713 def add_activedata(self, branches, days, by_component):
714 suites = {
715 # List of known suites requiring special path handling and/or
716 # suites typically containing thousands of test paths.
717 # regexes have been selected by trial and error to partition data
718 # into queries returning less than ACTIVEDATA_RECORD_LIMIT records.
719 "reftest": (self.path_mod_reftest,
720 [{"regex": {"result.test": "layout/reftests/[a-k].*"}},
721 {"regex": {"result.test": "layout/reftests/[^a-k].*"}},
722 {"not": {"regex": {"result.test": "layout/reftests/.*"}}}]),
723 "web-platform-tests": (self.path_mod_wpt,
724 [{"regex": {"result.test": "/[a-g].*"}},
725 {"regex": {"result.test": "/[h-p].*"}},
726 {"not": {"regex": {"result.test": "/[a-p].*"}}}]),
727 "web-platform-tests-reftest": (self.path_mod_wpt,
728 [{"regex": {"result.test": "/css/css-.*"}},
729 {"not": {"regex": {"result.test": "/css/css-.*"}}}]),
730 "crashtest": (None,
731 [{"regex": {"result.test": "[a-g].*"}},
732 {"not": {"regex": {"result.test": "[a-g].*"}}}]),
733 "web-platform-tests-wdspec": (self.path_mod_wpt, [None]),
734 "web-platform-tests-crashtest": (self.path_mod_wpt, [None]),
735 "xpcshell": (self.path_mod_xpcshell, [None]),
736 "mochitest-plain": (None, [None]),
737 "mochitest-browser-chrome": (None, [None]),
738 "mochitest-media": (None, [None]),
739 "mochitest-devtools-chrome": (None, [None]),
740 "marionette": (self.path_mod_marionette, [None]),
741 "mochitest-chrome": (None, [None]),
743 unsupported_suites = [
744 # Usually these suites are excluded because currently the test resolver
745 # does not provide test paths for them.
746 "jsreftest",
747 "jittest",
748 "geckoview-junit",
749 "cppunittest",
751 for suite in suites:
752 suite_clause = {"eq": {"run.suite.name": suite}}
753 path_mod = suites[suite][0]
754 test_clauses = suites[suite][1]
755 suite_count = 1
756 for test_clause in test_clauses:
757 label = "%s-%d" % (suite, suite_count)
758 suite_count += 1
759 self.add_activedata_for_suite(label, branches, days,
760 suite_clause, test_clause, path_mod)
761 # Remainder: All supported suites not handled above.
762 suite_clause = {"not": {"in": {"run.suite.name": unsupported_suites + list(suites)}}}
763 self.add_activedata_for_suite("remainder", branches, days,
764 suite_clause, None, None)
765 self.collect_activedata_results(by_component)
767 def description(self, components, flavor, subsuite, paths,
768 show_manifests, show_tests, show_summary, show_annotations,
769 show_activedata,
770 filter_values, filter_keys,
771 branches, days):
772 # provide a natural language description of the report options
773 what = []
774 if show_manifests:
775 what.append("test manifests")
776 if show_tests:
777 what.append("tests")
778 if show_annotations:
779 what.append("test manifest annotations")
780 if show_summary and len(what) == 0:
781 what.append("summary of tests only")
782 if len(what) > 1:
783 what[-1] = "and " + what[-1]
784 what = ", ".join(what)
785 d = "Test summary report for " + what
786 if components:
787 d += ", in specified components (%s)" % components
788 else:
789 d += ", in all components"
790 if flavor:
791 d += ", in specified flavor (%s)" % flavor
792 if subsuite:
793 d += ", in specified subsuite (%s)" % subsuite
794 if paths:
795 d += ", on specified paths (%s)" % paths
796 if filter_values:
797 d += ", containing '%s'" % filter_values
798 if filter_keys:
799 d += " in manifest keys '%s'" % filter_keys
800 else:
801 d += " in any part of manifest entry"
802 if show_activedata:
803 d += ", including historical run-time data for the last %d days on %s" % (
804 days, branches)
805 d += " as of %s." % datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
806 return d
808 def report(self, components, flavor, subsuite, paths,
809 show_manifests, show_tests, show_summary, show_annotations,
810 show_activedata,
811 filter_values, filter_keys, show_components, output_file,
812 branches, days):
814 def matches_filters(test):
816 Return True if all of the requested filter_values are found in this test;
817 if filter_keys are specified, restrict search to those test keys.
819 for value in filter_values:
820 value_found = False
821 for key in test:
822 if not filter_keys or key in filter_keys:
823 if re.search(value, test[key]):
824 value_found = True
825 break
826 if not value_found:
827 return False
828 return True
830 start_time = datetime.datetime.now()
832 # Ensure useful report by default
833 if not show_manifests and not show_tests and not show_summary and not show_annotations:
834 show_manifests = True
835 show_summary = True
837 by_component = {}
838 if components:
839 components = components.split(',')
840 if filter_keys:
841 filter_keys = filter_keys.split(',')
842 if filter_values:
843 filter_values = filter_values.split(',')
844 else:
845 filter_values = []
846 display_keys = (filter_keys or []) + ['skip-if', 'fail-if', 'fails-if']
847 display_keys = set(display_keys)
849 print("Finding tests...")
850 here = os.path.abspath(os.path.dirname(__file__))
851 resolver = TestResolver.from_environment(cwd=here, loader_cls=TestManifestLoader)
852 tests = list(resolver.resolve_tests(paths=paths, flavor=flavor,
853 subsuite=subsuite))
855 manifest_paths = set()
856 for t in tests:
857 if 'manifest' in t and t['manifest'] is not None:
858 manifest_paths.add(t['manifest'])
859 manifest_count = len(manifest_paths)
860 print("Resolver found {} tests, {} manifests".format(len(tests), manifest_count))
862 if show_manifests:
863 topsrcdir = self.build_obj.topsrcdir
864 by_component['manifests'] = {}
865 manifest_paths = list(manifest_paths)
866 manifest_paths.sort()
867 relpaths = []
868 for manifest_path in manifest_paths:
869 relpath = mozpath.relpath(manifest_path, topsrcdir)
870 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
871 continue
872 relpaths.append(relpath)
873 reader = self.build_obj.mozbuild_reader(config_mode='empty')
874 files_info = reader.files_info(relpaths)
875 for manifest_path in manifest_paths:
876 relpath = mozpath.relpath(manifest_path, topsrcdir)
877 if mozpath.commonprefix((manifest_path, topsrcdir)) != topsrcdir:
878 continue
879 manifest_info = None
880 if relpath in files_info:
881 bug_component = files_info[relpath].get('BUG_COMPONENT')
882 if bug_component:
883 key = "{}::{}".format(bug_component.product, bug_component.component)
884 else:
885 key = "<unknown bug component>"
886 if (not components) or (key in components):
887 manifest_info = {
888 'manifest': relpath,
889 'tests': 0,
890 'skipped': 0
892 rkey = key if show_components else 'all'
893 if rkey in by_component['manifests']:
894 by_component['manifests'][rkey].append(manifest_info)
895 else:
896 by_component['manifests'][rkey] = [manifest_info]
897 if manifest_info:
898 for t in tests:
899 if t['manifest'] == manifest_path:
900 manifest_info['tests'] += 1
901 if t.get('skip-if'):
902 manifest_info['skipped'] += 1
903 for key in by_component['manifests']:
904 by_component['manifests'][key].sort()
906 if show_tests:
907 by_component['tests'] = {}
909 if show_tests or show_summary or show_annotations:
910 test_count = 0
911 failed_count = 0
912 skipped_count = 0
913 annotation_count = 0
914 condition_count = 0
915 component_set = set()
916 relpaths = []
917 conditions = {}
918 known_unconditional_annotations = ['skip', 'fail', 'asserts', 'random']
919 known_conditional_annotations = ['skip-if', 'fail-if', 'run-if',
920 'fails-if', 'fuzzy-if', 'random-if', 'asserts-if']
921 for t in tests:
922 relpath = t.get('srcdir_relpath')
923 relpaths.append(relpath)
924 reader = self.build_obj.mozbuild_reader(config_mode='empty')
925 files_info = reader.files_info(relpaths)
926 for t in tests:
927 if not matches_filters(t):
928 continue
929 if 'referenced-test' in t:
930 # Avoid double-counting reftests: disregard reference file entries
931 continue
932 if show_annotations:
933 for key in t:
934 if key in known_unconditional_annotations:
935 annotation_count += 1
936 if key in known_conditional_annotations:
937 annotation_count += 1
938 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
939 # is the associated condition. For example, the manifestparser
940 # manifest annotation, "skip-if = os == 'win'", is expected to be
941 # encoded as t['skip-if'] = "os == 'win'".
942 # To allow for reftest manifests, t[key] may have multiple entries
943 # separated by ';', each corresponding to a condition for that test
944 # and annotation type. For example,
945 # "skip-if(Android&&webrender) skip-if(OSX)", would be
946 # encoded as t['skip-if'] = "Android&&webrender;OSX".
947 annotation_conditions = t[key].split(';')
948 for condition in annotation_conditions:
949 condition_count += 1
950 # Trim reftest fuzzy-if ranges: everything after the first comma
951 # eg. "Android,0-2,1-3" -> "Android"
952 condition = condition.split(',')[0]
953 if condition not in conditions:
954 conditions[condition] = 0
955 conditions[condition] += 1
956 test_count += 1
957 relpath = t.get('srcdir_relpath')
958 if relpath in files_info:
959 bug_component = files_info[relpath].get('BUG_COMPONENT')
960 if bug_component:
961 key = "{}::{}".format(bug_component.product, bug_component.component)
962 else:
963 key = "<unknown bug component>"
964 if (not components) or (key in components):
965 component_set.add(key)
966 test_info = {'test': relpath}
967 for test_key in display_keys:
968 value = t.get(test_key)
969 if value:
970 test_info[test_key] = value
971 if t.get('fail-if'):
972 failed_count += 1
973 if t.get('fails-if'):
974 failed_count += 1
975 if t.get('skip-if'):
976 skipped_count += 1
977 if show_tests:
978 rkey = key if show_components else 'all'
979 if rkey in by_component['tests']:
980 # Avoid duplicates: Some test paths have multiple TestResolver
981 # entries, as when a test is included by multiple manifests.
982 found = False
983 for ctest in by_component['tests'][rkey]:
984 if ctest['test'] == test_info['test']:
985 found = True
986 break
987 if not found:
988 by_component['tests'][rkey].append(test_info)
989 else:
990 by_component['tests'][rkey] = [test_info]
991 if show_tests:
992 for key in by_component['tests']:
993 by_component['tests'][key].sort(key=lambda k: k['test'])
995 if show_activedata:
996 try:
997 self.add_activedata(branches, days, by_component)
998 except Exception:
999 print("Failed to retrieve some ActiveData data.")
1000 traceback.print_exc()
1001 self.log_verbose("%d tests updated with matching ActiveData data" %
1002 self.total_activedata_matches)
1003 self.log_verbose("%d seconds waiting for ActiveData" %
1004 self.total_activedata_seconds)
1006 by_component['description'] = self.description(
1007 components, flavor, subsuite, paths,
1008 show_manifests, show_tests, show_summary, show_annotations,
1009 show_activedata,
1010 filter_values, filter_keys,
1011 branches, days)
1013 if show_summary:
1014 by_component['summary'] = {}
1015 by_component['summary']['components'] = len(component_set)
1016 by_component['summary']['manifests'] = manifest_count
1017 by_component['summary']['tests'] = test_count
1018 by_component['summary']['failed tests'] = failed_count
1019 by_component['summary']['skipped tests'] = skipped_count
1021 if show_annotations:
1022 by_component['annotations'] = {}
1023 by_component['annotations']['total annotations'] = annotation_count
1024 by_component['annotations']['total conditions'] = condition_count
1025 by_component['annotations']['unique conditions'] = len(conditions)
1026 by_component['annotations']['conditions'] = conditions
1028 self.write_report(by_component, output_file)
1030 end_time = datetime.datetime.now()
1031 self.log_verbose("%d seconds total to generate report" %
1032 (end_time - start_time).total_seconds())
1034 def write_report(self, by_component, output_file):
1035 json_report = json.dumps(by_component, indent=2, sort_keys=True)
1036 if output_file:
1037 output_file = os.path.abspath(output_file)
1038 output_dir = os.path.dirname(output_file)
1039 if not os.path.isdir(output_dir):
1040 os.makedirs(output_dir)
1042 with open(output_file, 'w') as f:
1043 f.write(json_report)
1044 else:
1045 print(json_report)
1047 def report_diff(self, before, after, output_file):
1049 Support for 'mach test-info report-diff'.
1052 def get_file(path_or_url):
1053 if urlparse.urlparse(path_or_url).scheme:
1054 response = requests.get(path_or_url)
1055 response.raise_for_status()
1056 return json.loads(response.text)
1057 with open(path_or_url) as f:
1058 return json.load(f)
1060 report1 = get_file(before)
1061 report2 = get_file(after)
1063 by_component = {'tests': {}, 'summary': {}}
1064 self.diff_summaries(by_component, report1["summary"], report2["summary"])
1065 self.diff_all_components(by_component, report1["tests"], report2["tests"])
1066 self.write_report(by_component, output_file)
1068 def diff_summaries(self, by_component, summary1, summary2):
1070 Update by_component with comparison of summaries.
1072 all_keys = set(summary1.keys()) | set(summary2.keys())
1073 for key in all_keys:
1074 delta = summary2.get(key, 0) - summary1.get(key, 0)
1075 by_component['summary']['%s delta' % key] = delta
1077 def diff_all_components(self, by_component, tests1, tests2):
1079 Update by_component with any added/deleted tests, for all components.
1081 self.added_count = 0
1082 self.deleted_count = 0
1083 for component in tests1:
1084 component1 = tests1[component]
1085 component2 = [] if component not in tests2 else tests2[component]
1086 self.diff_component(by_component, component, component1, component2)
1087 for component in tests2:
1088 if component not in tests1:
1089 component2 = tests2[component]
1090 self.diff_component(by_component, component, [], component2)
1091 by_component['summary']['added tests'] = self.added_count
1092 by_component['summary']['deleted tests'] = self.deleted_count
1094 def diff_component(self, by_component, component, component1, component2):
1096 Update by_component[component] with any added/deleted tests for the
1097 named component.
1098 "added": tests found in component2 but missing from component1.
1099 "deleted": tests found in component1 but missing from component2.
1101 tests1 = set([t['test'] for t in component1])
1102 tests2 = set([t['test'] for t in component2])
1103 deleted = tests1 - tests2
1104 added = tests2 - tests1
1105 if deleted or added:
1106 by_component['tests'][component] = {}
1107 if deleted:
1108 by_component['tests'][component]['deleted'] = sorted(list(deleted))
1109 if added:
1110 by_component['tests'][component]['added'] = sorted(list(added))
1111 self.added_count += len(added)
1112 self.deleted_count += len(deleted)
1113 common = len(tests1.intersection(tests2))
1114 self.log_verbose("%s: %d deleted, %d added, %d common" % (component, len(deleted),
1115 len(added), common))