1 # This Source Code Form is subject to the terms of the Mozilla Public
2 # License, v. 2.0. If a copy of the MPL was not distributed with this
3 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
5 from __future__
import absolute_import
, print_function
14 import six
.moves
.urllib_parse
as urlparse
18 import mozpack
.path
as mozpath
19 from moztest
.resolve
import TestResolver
, TestManifestLoader
20 from mozfile
import which
22 from mozbuild
.base
import MozbuildObject
, MachCommandConditions
as conditions
24 ACTIVEDATA_RECORD_LIMIT
= 10000
25 MAX_ACTIVEDATA_CONCURRENCY
= 5
26 MAX_ACTIVEDATA_RETRIES
= 5
27 REFERER
= 'https://wiki.developer.mozilla.org/en-US/docs/Mozilla/Test-Info'
30 class TestInfo(object):
32 Support 'mach test-info'.
34 def __init__(self
, verbose
):
35 self
.verbose
= verbose
36 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
37 self
.build_obj
= MozbuildObject
.from_environment(cwd
=here
)
38 self
.total_activedata_seconds
= 0
40 def log_verbose(self
, what
):
44 def activedata_query(self
, query
):
45 start_time
= datetime
.datetime
.now()
46 self
.log_verbose(start_time
)
47 self
.log_verbose(json
.dumps(query
))
48 response
= requests
.post("http://activedata.allizom.org/query",
49 data
=json
.dumps(query
),
50 headers
={'referer': REFERER
},
52 end_time
= datetime
.datetime
.now()
53 self
.total_activedata_seconds
+= (end_time
- start_time
).total_seconds()
54 self
.log_verbose(end_time
)
55 self
.log_verbose(response
)
56 response
.raise_for_status()
57 data
= response
.json()["data"]
58 self
.log_verbose("response length: %d" % len(data
))
62 class ActiveDataThread(threading
.Thread
):
64 A thread to query ActiveData and wait for its response.
66 def __init__(self
, name
, ti
, query
, context
):
67 threading
.Thread
.__init
__(self
, name
=name
)
70 self
.context
= context
75 while attempt
< MAX_ACTIVEDATA_RETRIES
and not self
.response
:
77 self
.response
= self
.ti
.activedata_query(self
.query
)
79 self
.ti
.log_verbose("%s: no data received for query" % self
.name
)
83 self
.ti
.log_verbose("%s: Exception on attempt #%d:" % (self
.name
, attempt
))
88 class TestInfoTests(TestInfo
):
90 Support 'mach test-info tests': Detailed report of specified tests.
92 def __init__(self
, verbose
):
93 TestInfo
.__init
__(self
, verbose
)
96 if conditions
.is_hg(self
.build_obj
):
97 self
._hg
= which('hg')
99 raise OSError(errno
.ENOENT
, "Could not find 'hg' on PATH.")
102 if conditions
.is_git(self
.build_obj
):
103 self
._git
= which('git')
105 raise OSError(errno
.ENOENT
, "Could not find 'git' on PATH.")
107 def find_in_hg_or_git(self
, test_name
):
109 cmd
= [self
._hg
, 'files', '-I', test_name
]
111 cmd
= [self
._git
, 'ls-files', test_name
]
115 out
= subprocess
.check_output(cmd
, universal_newlines
=True).splitlines()
116 except subprocess
.CalledProcessError
:
120 def set_test_name(self
):
121 # Generating a unified report for a specific test is complicated
122 # by differences in the test name used in various data sources.
124 # - It is often convenient to request a report based only on
125 # a short file name, rather than the full path;
126 # - Bugs may be filed in bugzilla against a simple, short test
127 # name or the full path to the test;
128 # - In ActiveData, the full path is usually used, but sometimes
129 # also includes additional path components outside of the
130 # mercurial repo (common for reftests).
131 # This function attempts to find appropriate names for different
132 # queries based on the specified test name.
134 # full_test_name is full path to file in hg (or git)
135 self
.full_test_name
= None
136 out
= self
.find_in_hg_or_git(self
.test_name
)
137 if out
and len(out
) == 1:
138 self
.full_test_name
= out
[0]
139 elif out
and len(out
) > 1:
140 print("Ambiguous test name specified. Found:")
144 out
= self
.find_in_hg_or_git('**/%s*' % self
.test_name
)
145 if out
and len(out
) == 1:
146 self
.full_test_name
= out
[0]
147 elif out
and len(out
) > 1:
148 print("Ambiguous test name. Found:")
151 if self
.full_test_name
:
152 self
.full_test_name
.replace(os
.sep
, posixpath
.sep
)
153 print("Found %s in source control." % self
.full_test_name
)
155 print("Unable to validate test name '%s'!" % self
.test_name
)
156 self
.full_test_name
= self
.test_name
158 # search for full_test_name in test manifests
159 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
160 resolver
= TestResolver
.from_environment(cwd
=here
, loader_cls
=TestManifestLoader
)
161 relpath
= self
.build_obj
._wrap
_path
_argument
(self
.full_test_name
).relpath()
162 tests
= list(resolver
.resolve_tests(paths
=[relpath
]))
164 relpath
= self
.build_obj
._wrap
_path
_argument
(tests
[0]['manifest']).relpath()
165 print("%s found in manifest %s" % (self
.full_test_name
, relpath
))
166 if tests
[0].get('flavor'):
167 print(" flavor: %s" % tests
[0]['flavor'])
168 if tests
[0].get('skip-if'):
169 print(" skip-if: %s" % tests
[0]['skip-if'])
170 if tests
[0].get('fail-if'):
171 print(" fail-if: %s" % tests
[0]['fail-if'])
172 elif len(tests
) == 0:
173 print("%s not found in any test manifest!" % self
.full_test_name
)
175 print("%s found in more than one manifest!" % self
.full_test_name
)
177 # short_name is full_test_name without path
178 self
.short_name
= None
179 name_idx
= self
.full_test_name
.rfind('/')
181 self
.short_name
= self
.full_test_name
[name_idx
+ 1:]
182 if self
.short_name
and self
.short_name
== self
.test_name
:
183 self
.short_name
= None
185 if not (self
.show_results
or self
.show_durations
or self
.show_tasks
):
186 # no need to determine ActiveData name if not querying
189 def set_activedata_test_name(self
):
190 # activedata_test_name is name in ActiveData
191 self
.activedata_test_name
= None
197 simple_names
= [x
for x
in simple_names
if x
]
199 {"in": {"result.test": simple_names
}},
201 regex_names
= [".*%s.*" % re
.escape(x
) for x
in simple_names
if x
]
202 for r
in regex_names
:
203 searches
.append({"regexp": {"result.test": r
}})
208 "groupby": ["result.test"],
211 {"in": {"build.branch": self
.branches
.split(',')}},
212 {"gt": {"run.timestamp": {"date": self
.start
}}},
213 {"lt": {"run.timestamp": {"date": self
.end
}}}
216 print("Querying ActiveData...") # Following query can take a long time
217 data
= self
.activedata_query(query
)
218 if data
and len(data
) > 0:
219 self
.activedata_test_name
= [
221 for p
in simple_names
+ regex_names
223 if re
.match(p
+ "$", d
['result']['test'])
224 ][0] # first match is best match
225 if self
.activedata_test_name
:
226 print("Found records matching '%s' in ActiveData." %
227 self
.activedata_test_name
)
229 print("Unable to find matching records in ActiveData; using %s!" %
231 self
.activedata_test_name
= self
.test_name
233 def get_platform(self
, record
):
234 if 'platform' in record
['build']:
235 platform
= record
['build']['platform']
238 platform_words
= platform
.split('-')
240 # combine run and build types and eliminate duplicates
242 if 'run' in record
and 'type' in record
['run']:
243 run_types
= record
['run']['type']
244 run_types
= run_types
if isinstance(run_types
, list) else [run_types
]
246 if 'build' in record
and 'type' in record
['build']:
247 build_types
= record
['build']['type']
248 build_types
= build_types
if isinstance(build_types
, list) else [build_types
]
249 run_types
= list(set(run_types
+build_types
))
250 # '1proc' is used as a treeherder label but does not appear in run types
251 if 'e10s' not in run_types
:
252 run_types
= run_types
+ ['1proc']
253 for run_type
in run_types
:
254 # chunked is not interesting
255 if run_type
== 'chunked':
257 # e10s is the default: implied
258 if run_type
== 'e10s':
260 # sometimes a build/run type is already present in the build platform
261 if run_type
in platform_words
:
265 types_label
+= run_type
266 return "%s/%s:" % (platform
, types_label
)
268 def report_test_results(self
):
269 # Report test pass/fail summary from ActiveData
274 "groupby": ["build.platform", "build.type"],
276 {"aggregate": "count"},
280 {"when": {"eq": {"result.ok": "F"}}, "then": 1}
288 {"when": {"eq": {"result.status": "SKIP"}}, "then": 1}
293 {"value": "run.type", "aggregate": "union"}
296 {"eq": {"result.test": self
.activedata_test_name
}},
297 {"in": {"build.branch": self
.branches
.split(',')}},
298 {"gt": {"run.timestamp": {"date": self
.start
}}},
299 {"lt": {"run.timestamp": {"date": self
.end
}}}
302 print("\nTest results for %s on %s between %s and %s" %
303 (self
.activedata_test_name
, self
.branches
, self
.start
, self
.end
))
304 data
= self
.activedata_query(query
)
305 if data
and len(data
) > 0:
306 data
.sort(key
=self
.get_platform
)
308 worst_platform
= None
312 platform
= self
.get_platform(record
)
313 if platform
.startswith("-"):
315 runs
= record
['count']
316 total_runs
= total_runs
+ runs
317 failures
= record
.get('failures', 0)
318 skips
= record
.get('skips', 0)
319 total_failures
= total_failures
+ failures
320 rate
= (float)(failures
) / runs
321 if rate
>= worst_rate
:
323 worst_platform
= platform
324 worst_failures
= failures
326 print("%-40s %6d failures (%6d skipped) in %6d runs" % (
327 platform
, failures
, skips
, runs
))
328 print("\nTotal: %d failures in %d runs or %.3f failures/run" %
329 (total_failures
, total_runs
, (float)(total_failures
) / total_runs
))
330 if worst_failures
> 0:
331 print("Worst rate on %s %d failures in %d runs or %.3f failures/run" %
332 (worst_platform
, worst_failures
, worst_runs
, worst_rate
))
334 print("No test result data found.")
336 def report_test_durations(self
):
337 # Report test durations summary from ActiveData
342 "groupby": ["build.platform", "build.type"],
344 {"value": "result.duration",
345 "aggregate": "average", "name": "average"},
346 {"value": "result.duration", "aggregate": "min", "name": "min"},
347 {"value": "result.duration", "aggregate": "max", "name": "max"},
348 {"aggregate": "count"},
349 {"value": "run.type", "aggregate": "union"}
352 {"eq": {"result.ok": "T"}},
353 {"eq": {"result.test": self
.activedata_test_name
}},
354 {"in": {"build.branch": self
.branches
.split(',')}},
355 {"gt": {"run.timestamp": {"date": self
.start
}}},
356 {"lt": {"run.timestamp": {"date": self
.end
}}}
359 data
= self
.activedata_query(query
)
360 print("\nTest durations for %s on %s between %s and %s" %
361 (self
.activedata_test_name
, self
.branches
, self
.start
, self
.end
))
362 if data
and len(data
) > 0:
363 data
.sort(key
=self
.get_platform
)
365 platform
= self
.get_platform(record
)
366 if platform
.startswith("-"):
368 print("%-40s %6.2f s (%.2f s - %.2f s over %d runs)" % (
369 platform
, record
['average'], record
['min'],
370 record
['max'], record
['count']))
372 print("No test durations found.")
374 def report_test_tasks(self
):
375 # Report test tasks summary from ActiveData
380 "select": ["build.platform", "build.type", "run.type", "run.name"],
382 {"eq": {"result.test": self
.activedata_test_name
}},
383 {"in": {"build.branch": self
.branches
.split(',')}},
384 {"gt": {"run.timestamp": {"date": self
.start
}}},
385 {"lt": {"run.timestamp": {"date": self
.end
}}}
388 data
= self
.activedata_query(query
)
389 print("\nTest tasks for %s on %s between %s and %s" %
390 (self
.activedata_test_name
, self
.branches
, self
.start
, self
.end
))
391 if data
and len(data
) > 0:
392 data
.sort(key
=self
.get_platform
)
395 platform
= self
.get_platform(record
)
396 if platform
not in consolidated
:
397 consolidated
[platform
] = {}
398 if record
['run']['name'] in consolidated
[platform
]:
399 consolidated
[platform
][record
['run']['name']] += 1
401 consolidated
[platform
][record
['run']['name']] = 1
402 for key
in sorted(consolidated
.keys()):
404 for task
in consolidated
[key
].keys():
406 tasks
+= "\n%-40s " % ""
408 tasks
+= " in %d runs" % consolidated
[key
][task
]
409 print("%-40s %s" % (key
, tasks
))
411 print("No test tasks found.")
413 def report_bugs(self
):
414 # Report open bugs matching test name
415 search
= self
.full_test_name
417 search
= '%s,%s' % (search
, self
.test_name
)
419 search
= '%s,%s' % (search
, self
.short_name
)
420 payload
= {'quicksearch': search
,
421 'include_fields': 'id,summary'}
422 response
= requests
.get('https://bugzilla.mozilla.org/rest/bug',
424 response
.raise_for_status()
425 json_response
= response
.json()
426 print("\nBugzilla quick search for '%s':" % search
)
427 if 'bugs' in json_response
:
428 for bug
in json_response
['bugs']:
429 print("Bug %s: %s" % (bug
['id'], bug
['summary']))
431 print("No bugs found.")
433 def report(self
, test_names
, branches
, start
, end
,
434 show_info
, show_results
, show_durations
, show_tasks
, show_bugs
):
435 self
.branches
= branches
438 self
.show_info
= show_info
439 self
.show_results
= show_results
440 self
.show_durations
= show_durations
441 self
.show_tasks
= show_tasks
443 if (not self
.show_info
and
444 not self
.show_results
and
445 not self
.show_durations
and
446 not self
.show_tasks
and
448 # by default, show everything
449 self
.show_info
= True
450 self
.show_results
= True
451 self
.show_durations
= True
452 self
.show_tasks
= True
455 for test_name
in test_names
:
456 print("===== %s =====" % test_name
)
457 self
.test_name
= test_name
458 if len(self
.test_name
) < 6:
459 print("'%s' is too short for a test name!" % self
.test_name
)
464 self
.set_activedata_test_name()
465 if self
.show_results
:
466 self
.report_test_results()
467 if self
.show_durations
:
468 self
.report_test_durations()
470 self
.report_test_tasks()
473 class TestInfoLongRunningTasks(TestInfo
):
475 Support 'mach test-info long-tasks': Summary of tasks approaching their max-run-time.
477 def __init__(self
, verbose
):
478 TestInfo
.__init
__(self
, verbose
)
480 def report(self
, branches
, start
, end
, threshold_pct
, filter_threshold_pct
):
482 def get_long_running_ratio(record
):
483 count
= record
['count']
484 tasks_gt_pct
= record
['tasks_gt_pct']
485 return count
/ tasks_gt_pct
487 # Search test durations in ActiveData for long-running tests
491 "groupby": ["run.name"],
495 "value": "task.maxRunTime",
496 "aggregate": "median",
497 "name": "max_run_time"
507 "div": ["action.duration", "task.maxRunTime"]
508 }, threshold_pct
/100.0
514 "name": "tasks_gt_pct"
518 {"in": {"build.branch": branches
.split(',')}},
519 {"gt": {"task.run.start_time": {"date": start
}}},
520 {"lte": {"task.run.start_time": {"date": end
}}},
521 {"eq": {"task.state": "completed"}},
524 data
= self
.activedata_query(query
)
525 print("\nTasks nearing their max-run-time on %s between %s and %s" %
526 (branches
, start
, end
))
527 if data
and len(data
) > 0:
530 if 'tasks_gt_pct' in record
:
531 count
= record
['count']
532 tasks_gt_pct
= record
['tasks_gt_pct']
533 if float(tasks_gt_pct
) / count
> filter_threshold_pct
/ 100.0:
534 filtered
.append(record
)
535 filtered
.sort(key
=get_long_running_ratio
)
537 print("No long running tasks found.")
538 for record
in filtered
:
539 name
= record
['run']['name']
540 count
= record
['count']
541 max_run_time
= record
['max_run_time']
542 tasks_gt_pct
= record
['tasks_gt_pct']
543 print("%-55s: %d of %d runs (%.1f%%) exceeded %d%% of max-run-time (%d s)" %
544 (name
, tasks_gt_pct
, count
, tasks_gt_pct
* 100 / count
,
545 threshold_pct
, max_run_time
))
547 print("No tasks found.")
550 class TestInfoReport(TestInfo
):
552 Support 'mach test-info report': Report of test runs summarized by
553 manifest and component.
555 def __init__(self
, verbose
):
556 TestInfo
.__init
__(self
, verbose
)
557 self
.total_activedata_matches
= 0
560 def add_activedata_for_suite(self
, label
, branches
, days
,
561 suite_clause
, tests_clause
, path_mod
):
562 dates_clause
= {"date": "today-%dday" % days
}
565 {"in": {"repo.branch.name": branches
.split(',')}},
566 {"gt": {"run.timestamp": dates_clause
}},
569 where_conditions
.append(tests_clause
)
572 "limit": ACTIVEDATA_RECORD_LIMIT
,
574 "groupby": ["result.test"],
577 "name": "result.count",
581 "name": "result.duration",
582 "value": "result.duration",
586 "name": "result.failures",
588 {"when": {"eq": {"result.ok": "F"}}, "then": 1}
594 "name": "result.skips",
596 {"when": {"eq": {"result.status": "SKIP"}}, "then": 1}
602 "where": {"and": where_conditions
}
604 t
= ActiveDataThread(label
, self
, ad_query
, path_mod
)
605 self
.threads
.append(t
)
607 def update_report(self
, by_component
, result
, path_mod
):
608 def update_item(item
, label
, value
):
609 # It is important to include any existing item value in case ActiveData
610 # returns multiple records for the same test; that can happen if the report
611 # sometimes maps more than one ActiveData record to the same path.
612 new_value
= item
.get(label
, 0) + value
613 if type(new_value
) == int:
614 item
[label
] = new_value
616 item
[label
] = round(new_value
, 2)
618 if 'test' in result
and 'tests' in by_component
:
619 test
= result
['test']
621 test
= path_mod(test
)
622 for bc
in by_component
['tests']:
623 for item
in by_component
['tests'][bc
]:
624 if test
== item
['test']:
625 seconds
= round(result
.get('duration', 0), 2)
626 update_item(item
, 'total run time, seconds', seconds
)
627 update_item(item
, 'total runs', result
.get('count', 0))
628 update_item(item
, 'skipped runs', result
.get('skips', 0))
629 update_item(item
, 'failed runs', result
.get('failures', 0))
633 def collect_activedata_results(self
, by_component
):
634 # Start the first MAX_ACTIVEDATA_CONCURRENCY threads. If too many
635 # concurrent requests are made to ActiveData, the requests frequently
636 # fail (504 is the typical response).
637 for i
in range(min(MAX_ACTIVEDATA_CONCURRENCY
, len(self
.threads
))):
640 # Wait for running threads (first N threads in self.threads) to complete.
641 # When a thread completes, start the next thread, process the results
642 # from the completed thread, and remove the completed thread from
644 while len(self
.threads
):
645 running_threads
= min(MAX_ACTIVEDATA_CONCURRENCY
, len(self
.threads
))
646 for i
in range(running_threads
):
650 ad_response
= t
.response
654 if len(self
.threads
) >= MAX_ACTIVEDATA_CONCURRENCY
:
655 running_threads
= min(MAX_ACTIVEDATA_CONCURRENCY
, len(self
.threads
))
656 self
.threads
[running_threads
- 1].start()
658 if len(ad_response
) >= ACTIVEDATA_RECORD_LIMIT
:
659 print("%s: ActiveData query limit reached; data may be missing" % name
)
661 for record
in ad_response
:
662 if 'result' in record
:
663 result
= record
['result']
664 if self
.update_report(by_component
, result
, path_mod
):
666 self
.log_verbose("%s: %d results; %d matches" %
667 (name
, len(ad_response
), matches
))
668 self
.total_activedata_matches
+= matches
671 def path_mod_reftest(self
, path
):
672 # "<path1> == <path2>" -> "<path1>"
673 path
= path
.split(' ')[0]
674 # "<path>?<params>" -> "<path>"
675 path
= path
.split('?')[0]
676 # "<path>#<fragment>" -> "<path>"
677 path
= path
.split('#')[0]
680 def path_mod_jsreftest(self
, path
):
681 # "<path>;assert" -> "<path>"
682 path
= path
.split(';')[0]
685 def path_mod_marionette(self
, path
):
686 # "<path> <test-name>" -> "<path>"
687 path
= path
.split(' ')[0]
688 # "part1\part2" -> "part1/part2"
689 path
= path
.replace('\\', os
.path
.sep
)
692 def path_mod_wpt(self
, path
):
693 if path
[0] == os
.path
.sep
:
694 # "/<path>" -> "<path>"
696 # "<path>" -> "testing/web-platform/tests/<path>"
697 path
= os
.path
.join('testing', 'web-platform', 'tests', path
)
698 # "<path>?<params>" -> "<path>"
699 path
= path
.split('?')[0]
702 def path_mod_jittest(self
, path
):
703 # "part1\part2" -> "part1/part2"
704 path
= path
.replace('\\', os
.path
.sep
)
705 # "<path>" -> "js/src/jit-test/tests/<path>"
706 return os
.path
.join('js', 'src', 'jit-test', 'tests', path
)
708 def path_mod_xpcshell(self
, path
):
709 # <manifest>.ini:<path> -> "<path>"
710 path
= path
.split('.ini:')[-1]
713 def add_activedata(self
, branches
, days
, by_component
):
715 # List of known suites requiring special path handling and/or
716 # suites typically containing thousands of test paths.
717 # regexes have been selected by trial and error to partition data
718 # into queries returning less than ACTIVEDATA_RECORD_LIMIT records.
719 "reftest": (self
.path_mod_reftest
,
720 [{"regex": {"result.test": "layout/reftests/[a-k].*"}},
721 {"regex": {"result.test": "layout/reftests/[^a-k].*"}},
722 {"not": {"regex": {"result.test": "layout/reftests/.*"}}}]),
723 "web-platform-tests": (self
.path_mod_wpt
,
724 [{"regex": {"result.test": "/[a-g].*"}},
725 {"regex": {"result.test": "/[h-p].*"}},
726 {"not": {"regex": {"result.test": "/[a-p].*"}}}]),
727 "web-platform-tests-reftest": (self
.path_mod_wpt
,
728 [{"regex": {"result.test": "/css/css-.*"}},
729 {"not": {"regex": {"result.test": "/css/css-.*"}}}]),
731 [{"regex": {"result.test": "[a-g].*"}},
732 {"not": {"regex": {"result.test": "[a-g].*"}}}]),
733 "web-platform-tests-wdspec": (self
.path_mod_wpt
, [None]),
734 "web-platform-tests-crashtest": (self
.path_mod_wpt
, [None]),
735 "web-platform-tests-print-reftest": (self
.path_mod_wpt
, [None]),
736 "xpcshell": (self
.path_mod_xpcshell
, [None]),
737 "mochitest-plain": (None, [None]),
738 "mochitest-browser-chrome": (None, [None]),
739 "mochitest-media": (None, [None]),
740 "mochitest-devtools-chrome": (None, [None]),
741 "marionette": (self
.path_mod_marionette
, [None]),
742 "mochitest-chrome": (None, [None]),
744 unsupported_suites
= [
745 # Usually these suites are excluded because currently the test resolver
746 # does not provide test paths for them.
753 suite_clause
= {"eq": {"run.suite.name": suite
}}
754 path_mod
= suites
[suite
][0]
755 test_clauses
= suites
[suite
][1]
757 for test_clause
in test_clauses
:
758 label
= "%s-%d" % (suite
, suite_count
)
760 self
.add_activedata_for_suite(label
, branches
, days
,
761 suite_clause
, test_clause
, path_mod
)
762 # Remainder: All supported suites not handled above.
763 suite_clause
= {"not": {"in": {"run.suite.name": unsupported_suites
+ list(suites
)}}}
764 self
.add_activedata_for_suite("remainder", branches
, days
,
765 suite_clause
, None, None)
766 self
.collect_activedata_results(by_component
)
768 def description(self
, components
, flavor
, subsuite
, paths
,
769 show_manifests
, show_tests
, show_summary
, show_annotations
,
771 filter_values
, filter_keys
,
773 # provide a natural language description of the report options
776 what
.append("test manifests")
780 what
.append("test manifest annotations")
781 if show_summary
and len(what
) == 0:
782 what
.append("summary of tests only")
784 what
[-1] = "and " + what
[-1]
785 what
= ", ".join(what
)
786 d
= "Test summary report for " + what
788 d
+= ", in specified components (%s)" % components
790 d
+= ", in all components"
792 d
+= ", in specified flavor (%s)" % flavor
794 d
+= ", in specified subsuite (%s)" % subsuite
796 d
+= ", on specified paths (%s)" % paths
798 d
+= ", containing '%s'" % filter_values
800 d
+= " in manifest keys '%s'" % filter_keys
802 d
+= " in any part of manifest entry"
804 d
+= ", including historical run-time data for the last %d days on %s" % (
806 d
+= " as of %s." % datetime
.datetime
.now().strftime("%Y-%m-%d %H:%M")
809 def report(self
, components
, flavor
, subsuite
, paths
,
810 show_manifests
, show_tests
, show_summary
, show_annotations
,
812 filter_values
, filter_keys
, show_components
, output_file
,
815 def matches_filters(test
):
817 Return True if all of the requested filter_values are found in this test;
818 if filter_keys are specified, restrict search to those test keys.
820 for value
in filter_values
:
823 if not filter_keys
or key
in filter_keys
:
824 if re
.search(value
, test
[key
]):
831 start_time
= datetime
.datetime
.now()
833 # Ensure useful report by default
834 if not show_manifests
and not show_tests
and not show_summary
and not show_annotations
:
835 show_manifests
= True
840 components
= components
.split(',')
842 filter_keys
= filter_keys
.split(',')
844 filter_values
= filter_values
.split(',')
847 display_keys
= (filter_keys
or []) + ['skip-if', 'fail-if', 'fails-if']
848 display_keys
= set(display_keys
)
850 print("Finding tests...")
851 here
= os
.path
.abspath(os
.path
.dirname(__file__
))
852 resolver
= TestResolver
.from_environment(cwd
=here
, loader_cls
=TestManifestLoader
)
853 tests
= list(resolver
.resolve_tests(paths
=paths
, flavor
=flavor
,
856 manifest_paths
= set()
858 if 'manifest' in t
and t
['manifest'] is not None:
859 manifest_paths
.add(t
['manifest'])
860 manifest_count
= len(manifest_paths
)
861 print("Resolver found {} tests, {} manifests".format(len(tests
), manifest_count
))
864 topsrcdir
= self
.build_obj
.topsrcdir
865 by_component
['manifests'] = {}
866 manifest_paths
= list(manifest_paths
)
867 manifest_paths
.sort()
869 for manifest_path
in manifest_paths
:
870 relpath
= mozpath
.relpath(manifest_path
, topsrcdir
)
871 if mozpath
.commonprefix((manifest_path
, topsrcdir
)) != topsrcdir
:
873 relpaths
.append(relpath
)
874 reader
= self
.build_obj
.mozbuild_reader(config_mode
='empty')
875 files_info
= reader
.files_info(relpaths
)
876 for manifest_path
in manifest_paths
:
877 relpath
= mozpath
.relpath(manifest_path
, topsrcdir
)
878 if mozpath
.commonprefix((manifest_path
, topsrcdir
)) != topsrcdir
:
881 if relpath
in files_info
:
882 bug_component
= files_info
[relpath
].get('BUG_COMPONENT')
884 key
= "{}::{}".format(bug_component
.product
, bug_component
.component
)
886 key
= "<unknown bug component>"
887 if (not components
) or (key
in components
):
893 rkey
= key
if show_components
else 'all'
894 if rkey
in by_component
['manifests']:
895 by_component
['manifests'][rkey
].append(manifest_info
)
897 by_component
['manifests'][rkey
] = [manifest_info
]
900 if t
['manifest'] == manifest_path
:
901 manifest_info
['tests'] += 1
903 manifest_info
['skipped'] += 1
904 for key
in by_component
['manifests']:
905 by_component
['manifests'][key
].sort()
908 by_component
['tests'] = {}
910 if show_tests
or show_summary
or show_annotations
:
916 component_set
= set()
919 known_unconditional_annotations
= ['skip', 'fail', 'asserts', 'random']
920 known_conditional_annotations
= ['skip-if', 'fail-if', 'run-if',
921 'fails-if', 'fuzzy-if', 'random-if', 'asserts-if']
923 relpath
= t
.get('srcdir_relpath')
924 relpaths
.append(relpath
)
925 reader
= self
.build_obj
.mozbuild_reader(config_mode
='empty')
926 files_info
= reader
.files_info(relpaths
)
928 if not matches_filters(t
):
930 if 'referenced-test' in t
:
931 # Avoid double-counting reftests: disregard reference file entries
935 if key
in known_unconditional_annotations
:
936 annotation_count
+= 1
937 if key
in known_conditional_annotations
:
938 annotation_count
+= 1
939 # Here 'key' is a manifest annotation type like 'skip-if' and t[key]
940 # is the associated condition. For example, the manifestparser
941 # manifest annotation, "skip-if = os == 'win'", is expected to be
942 # encoded as t['skip-if'] = "os == 'win'".
943 # To allow for reftest manifests, t[key] may have multiple entries
944 # separated by ';', each corresponding to a condition for that test
945 # and annotation type. For example,
946 # "skip-if(Android&&webrender) skip-if(OSX)", would be
947 # encoded as t['skip-if'] = "Android&&webrender;OSX".
948 annotation_conditions
= t
[key
].split(';')
949 for condition
in annotation_conditions
:
951 # Trim reftest fuzzy-if ranges: everything after the first comma
952 # eg. "Android,0-2,1-3" -> "Android"
953 condition
= condition
.split(',')[0]
954 if condition
not in conditions
:
955 conditions
[condition
] = 0
956 conditions
[condition
] += 1
958 relpath
= t
.get('srcdir_relpath')
959 if relpath
in files_info
:
960 bug_component
= files_info
[relpath
].get('BUG_COMPONENT')
962 key
= "{}::{}".format(bug_component
.product
, bug_component
.component
)
964 key
= "<unknown bug component>"
965 if (not components
) or (key
in components
):
966 component_set
.add(key
)
967 test_info
= {'test': relpath
}
968 for test_key
in display_keys
:
969 value
= t
.get(test_key
)
971 test_info
[test_key
] = value
974 if t
.get('fails-if'):
979 rkey
= key
if show_components
else 'all'
980 if rkey
in by_component
['tests']:
981 # Avoid duplicates: Some test paths have multiple TestResolver
982 # entries, as when a test is included by multiple manifests.
984 for ctest
in by_component
['tests'][rkey
]:
985 if ctest
['test'] == test_info
['test']:
989 by_component
['tests'][rkey
].append(test_info
)
991 by_component
['tests'][rkey
] = [test_info
]
993 for key
in by_component
['tests']:
994 by_component
['tests'][key
].sort(key
=lambda k
: k
['test'])
998 self
.add_activedata(branches
, days
, by_component
)
1000 print("Failed to retrieve some ActiveData data.")
1001 traceback
.print_exc()
1002 self
.log_verbose("%d tests updated with matching ActiveData data" %
1003 self
.total_activedata_matches
)
1004 self
.log_verbose("%d seconds waiting for ActiveData" %
1005 self
.total_activedata_seconds
)
1007 by_component
['description'] = self
.description(
1008 components
, flavor
, subsuite
, paths
,
1009 show_manifests
, show_tests
, show_summary
, show_annotations
,
1011 filter_values
, filter_keys
,
1015 by_component
['summary'] = {}
1016 by_component
['summary']['components'] = len(component_set
)
1017 by_component
['summary']['manifests'] = manifest_count
1018 by_component
['summary']['tests'] = test_count
1019 by_component
['summary']['failed tests'] = failed_count
1020 by_component
['summary']['skipped tests'] = skipped_count
1022 if show_annotations
:
1023 by_component
['annotations'] = {}
1024 by_component
['annotations']['total annotations'] = annotation_count
1025 by_component
['annotations']['total conditions'] = condition_count
1026 by_component
['annotations']['unique conditions'] = len(conditions
)
1027 by_component
['annotations']['conditions'] = conditions
1029 self
.write_report(by_component
, output_file
)
1031 end_time
= datetime
.datetime
.now()
1032 self
.log_verbose("%d seconds total to generate report" %
1033 (end_time
- start_time
).total_seconds())
1035 def write_report(self
, by_component
, output_file
):
1036 json_report
= json
.dumps(by_component
, indent
=2, sort_keys
=True)
1038 output_file
= os
.path
.abspath(output_file
)
1039 output_dir
= os
.path
.dirname(output_file
)
1040 if not os
.path
.isdir(output_dir
):
1041 os
.makedirs(output_dir
)
1043 with
open(output_file
, 'w') as f
:
1044 f
.write(json_report
)
1048 def report_diff(self
, before
, after
, output_file
):
1050 Support for 'mach test-info report-diff'.
1053 def get_file(path_or_url
):
1054 if urlparse
.urlparse(path_or_url
).scheme
:
1055 response
= requests
.get(path_or_url
)
1056 response
.raise_for_status()
1057 return json
.loads(response
.text
)
1058 with
open(path_or_url
) as f
:
1061 report1
= get_file(before
)
1062 report2
= get_file(after
)
1064 by_component
= {'tests': {}, 'summary': {}}
1065 self
.diff_summaries(by_component
, report1
["summary"], report2
["summary"])
1066 self
.diff_all_components(by_component
, report1
["tests"], report2
["tests"])
1067 self
.write_report(by_component
, output_file
)
1069 def diff_summaries(self
, by_component
, summary1
, summary2
):
1071 Update by_component with comparison of summaries.
1073 all_keys
= set(summary1
.keys()) |
set(summary2
.keys())
1074 for key
in all_keys
:
1075 delta
= summary2
.get(key
, 0) - summary1
.get(key
, 0)
1076 by_component
['summary']['%s delta' % key
] = delta
1078 def diff_all_components(self
, by_component
, tests1
, tests2
):
1080 Update by_component with any added/deleted tests, for all components.
1082 self
.added_count
= 0
1083 self
.deleted_count
= 0
1084 for component
in tests1
:
1085 component1
= tests1
[component
]
1086 component2
= [] if component
not in tests2
else tests2
[component
]
1087 self
.diff_component(by_component
, component
, component1
, component2
)
1088 for component
in tests2
:
1089 if component
not in tests1
:
1090 component2
= tests2
[component
]
1091 self
.diff_component(by_component
, component
, [], component2
)
1092 by_component
['summary']['added tests'] = self
.added_count
1093 by_component
['summary']['deleted tests'] = self
.deleted_count
1095 def diff_component(self
, by_component
, component
, component1
, component2
):
1097 Update by_component[component] with any added/deleted tests for the
1099 "added": tests found in component2 but missing from component1.
1100 "deleted": tests found in component1 but missing from component2.
1102 tests1
= set([t
['test'] for t
in component1
])
1103 tests2
= set([t
['test'] for t
in component2
])
1104 deleted
= tests1
- tests2
1105 added
= tests2
- tests1
1106 if deleted
or added
:
1107 by_component
['tests'][component
] = {}
1109 by_component
['tests'][component
]['deleted'] = sorted(list(deleted
))
1111 by_component
['tests'][component
]['added'] = sorted(list(added
))
1112 self
.added_count
+= len(added
)
1113 self
.deleted_count
+= len(deleted
)
1114 common
= len(tests1
.intersection(tests2
))
1115 self
.log_verbose("%s: %d deleted, %d added, %d common" % (component
, len(deleted
),
1116 len(added
), common
))