1 # tko/nightly.py code shared by various tko/*.cgi graphing scripts
6 from autotest_lib
.tko
import db
, plotgraph
, perf
7 from autotest_lib
.client
.common_lib
import kernel_versions
10 def add_kernel_jobs(label_pattern
):
11 cmd
= "select job_idx from tko_jobs where label like '%s'" % label_pattern
12 nrows
= perf
.db_cur
.execute(cmd
)
13 return [row
[0] for row
in perf
.db_cur
.fetchall()]
16 def is_filtered_platform(platform
, platforms_filter
):
17 if not platforms_filter
:
19 for p
in platforms_filter
:
20 if platform
.startswith(p
):
25 def get_test_attributes(testrunx
):
26 cmd
= ( "select attribute, value from tko_test_attributes"
27 " where test_idx = %d" % testrunx
)
28 nrows
= perf
.db_cur
.execute(cmd
)
29 return dict(perf
.db_cur
.fetchall())
32 def get_antag(testrunx
):
33 attrs
= get_test_attributes(testrunx
)
34 return attrs
.get('antag', None)
37 def matching_test_attributes(attrs
, required_test_attributes
):
38 if not required_test_attributes
:
40 matches
= [attrs
[key
] == required_test_attributes
[key
]
41 for key
in attrs
if key
in required_test_attributes
]
42 return min(matches
+[True]) # True if all jointly-existing keys matched
45 def collect_testruns(jobs
, test
, test_attributes
,
46 platforms_filter
, by_hosts
, no_antag
):
47 # get test_runs run #s for 1 test on 1 kernel and some platforms
48 # TODO: Is jobs list short enough to use directly in 1 sql cmd?
49 # TODO: add filtering on test series?
50 runs
= {} # platform --> list of test runs
52 cmd
= ( "select test_idx, machine_idx from tko_tests"
53 " where job_idx = %s and test = %s" )
55 nrows
= perf
.db_cur
.execute(cmd
, args
)
56 for testrunx
, machx
in perf
.db_cur
.fetchall():
57 platform
, host
= perf
.machine_idx_to_platform_host(machx
)
60 if ( is_filtered_platform(platform
, platforms_filter
) and
61 matching_test_attributes(get_test_attributes(testrunx
),
63 (not no_antag
or get_antag(testrunx
) == '') ):
64 runs
.setdefault(platform
, []).append(testrunx
)
68 def all_tested_platforms(test_runs
):
69 # extract list of all tested platforms from test_runs table
71 for kernel
in test_runs
:
72 platforms
.update(set(test_runs
[kernel
].keys()))
73 return sorted(platforms
)
76 def divide_twoway_testruns(test_runs
, platform
):
77 # partition all twoway runs based on name of antagonist progs
80 for kernel
in test_runs
:
82 for testrunx
in test_runs
[kernel
].get(platform
, []):
83 antag
= get_antag(testrunx
)
85 runs
.setdefault(antag
, []).append(testrunx
)
86 antagonists
.add(antag
)
87 twoway_runs
[kernel
] = runs
88 return twoway_runs
, sorted(antagonists
)
91 def collect_raw_scores(runs
, metric
):
92 # get unscaled scores of test runs for 1 test on certain jobs
93 # arrange them by platform type
94 platform_scores
= {} # platform --> list of perf scores
96 vals
= perf
.get_metric_at_point(runs
[platform
], metric
)
98 platform_scores
[platform
] = vals
99 return platform_scores
102 def collect_scaled_scores(metric
, test_runs
, regressed_platforms
, relative
):
103 # get scores of test runs for 1 test on some kernels and platforms
104 # optionally make relative to oldest (?) kernel on that platform
105 # arrange by plotline (ie platform) for gnuplot
106 plot_data
= {} # platform --> (kernel --> list of perf scores)
108 for kernel
in sorted(test_runs
.keys()):
109 for platform
in test_runs
[kernel
]:
110 if not (regressed_platforms
is None or
111 platform
in regressed_platforms
):
112 continue # delete results for uninteresting platforms
113 vals
= perf
.get_metric_at_point(test_runs
[kernel
][platform
],
117 if platform
not in baseline
:
118 baseline
[platform
], std
= plotgraph
.avg_dev(vals
)
119 vals
= [v
/baseline
[platform
] for v
in vals
]
120 pdp
= plot_data
.setdefault(platform
, {})
121 pdp
.setdefault(kernel
, []).extend(vals
)
125 def collect_twoway_scores(metric
, antagonists
, twoway_runs
, relative
):
128 for kernel
in twoway_runs
:
129 for test2
in antagonists
:
130 runs
= twoway_runs
[kernel
].get(test2
, [])
131 vals
= perf
.get_metric_at_point(runs
, metric
)
132 plot_data
.setdefault(test2
, {})
134 plot_data
[test2
][kernel
] = vals
136 vals
= plot_data
[alone
].get(kernel
, [])
138 baseline
= perf
.average(vals
)
139 for test2
in antagonists
:
140 vals
= plot_data
[test2
].get(kernel
, [])
141 vals
= [val
/baseline
for val
in vals
]
143 plot_data
[test2
][kernel
] = vals
145 for test2
in antagonists
:
146 if kernel
in plot_data
[test2
]:
147 del plot_data
[test2
][kernel
]
151 def find_regressions(kernels
, test_runs
, metric
):
152 # A test is regressed on some platform if its latest results are
153 # definitely lower than on the reference kernel.
154 # Runs for the latest kernel may be underway and incomplete.
155 # In that case, selectively use next-latest kernel.
156 # TODO: the next-latest method hurts if latest run is not sorted last,
157 # or if there are several dev threads
160 prev
= kernels
[-2:][0]
161 scores
= {} # kernel --> (platform --> list of perf scores)
162 for k
in [ref
, prev
, latest
]:
164 scores
[k
] = collect_raw_scores(test_runs
[k
], metric
)
165 regressed_platforms
= []
166 for platform
in scores
[ref
]:
167 if latest
in scores
and platform
in scores
[latest
]:
169 elif prev
in scores
and platform
in scores
[prev
]:
171 else: # perhaps due to decay of test machines
172 k
= ref
# no regression info avail
173 ref_avg
, ref_std
= plotgraph
.avg_dev(scores
[ref
][platform
])
174 avg
, std
= plotgraph
.avg_dev(scores
[ k
][platform
])
175 if avg
+std
< ref_avg
-ref_std
:
176 regressed_platforms
.append(platform
)
177 return sorted(regressed_platforms
)
180 def get_testrun_context(testrun
):
181 cmd
= ( 'select tko_jobs.label, tko_jobs.tag, tko_tests.subdir,'
182 ' tko_tests.started_time'
183 ' from tko_jobs, tko_tests'
184 ' where tko_jobs.job_idx = tko_tests.job_idx'
185 ' and tko_tests.test_idx = %d' % testrun
)
186 nrows
= perf
.db_cur
.execute(cmd
)
188 row
= perf
.db_cur
.fetchone()
189 row
= [row
[0], row
[1], row
[2], row
[3].strftime('%m/%d/%y %H:%M')]
194 print "Content-Type: text/html\n\n<html><body>"
197 def abs_rel_link(myurl
, passthru
):
198 # link redraws current page with opposite absolute/relative choice
199 mod_passthru
= passthru
[:]
200 if 'absolute' in passthru
:
201 mod_passthru
.remove('absolute')
202 opposite
= 'relative'
204 mod_passthru
.append('absolute')
205 opposite
= 'absolute'
206 url
= '%s?%s' % (myurl
, '&'.join(mod_passthru
))
207 return "<a href='%s'> %s </a>" % (url
, opposite
)
210 def table_1_metric_all_kernels(plot_data
, columns
, column_argname
,
211 kernels
, kernel_dates
,
212 myurl
, filtered_passthru
):
213 # generate html table of graph's numbers
214 # for 1 benchmark metric over all kernels (rows),
215 # over various platforms or various antagonists etc (cols).
217 print "<table border=1 cellpadding=3 cellspacing=0>"
218 print "<tr> <td><b> Kernel </b></td>",
219 for label
in columns
:
220 if not label
and column_argname
== 'antag':
222 print "<td><b>", label
.replace('_', '<br>_'), "</b></td>"
224 for kernel
in kernels
:
225 print "<tr> <td><b>", kernel
, "</b>",
226 if kernel
in kernel_dates
:
227 print "<br><small>", kernel_dates
[kernel
], "</small>"
231 vals
= plot_data
[col
].get(kernel
, [])
235 (avg
, std_dev
) = plotgraph
.avg_dev(vals
)
236 if col
not in ref_thresholds
:
237 ref_thresholds
[col
] = avg
- std_dev
238 if avg
+std_dev
< ref_thresholds
[col
]:
239 print "bgcolor=pink",
241 args
= filtered_passthru
[:]
242 perf
.append_cgi_args(args
,
243 {column_argname
:col
, 'kernel':kernel
})
244 print "<a href='%s?%s&runs&attrs'>" % (myurl
,
246 print "<b>%.4g</b>" % avg
, "</a><br>",
247 print " <small> %dr </small>" % len(vals
),
248 print " <small> %.3g </small>" % std_dev
,
252 print "<p> <b>Bold value:</b> Average of this metric, then <br>"
253 print "number of good test runs, then standard deviation of those runs"
254 print "<br> Pink if regressed from reference kernel"
257 def table_all_metrics_1_platform(test_runs
, platform
, relative
):
258 # TODO: show std dev in cells
259 # can't mark regressions, since some metrics improve downwards
260 kernels
= perf
.sort_kernels(test_runs
.keys())
263 for kernel
in kernels
:
264 testruns
= test_runs
[kernel
].get(platform
, [])
266 d
= perf
.collect_all_metrics_scores(testruns
)
268 attrs
.update(set(d
.keys()))
270 print "No runs completed on", kernel
, "<br>"
271 attrs
= sorted(list(attrs
))[:100]
273 print "<table border=1 cellpadding=4 cellspacing=0>"
274 print "<tr><td> Metric </td>"
275 for kernel
in kernels
:
276 kernel
= kernel
.replace("_", "_<br>")
277 print "<td>", kernel
, "</td>"
281 print "<td>", attr
, "</td>"
283 for kernel
in kernels
:
285 if kernel
in scores
and attr
in scores
[kernel
]:
286 (avg
, dev
) = plotgraph
.avg_dev(scores
[kernel
][attr
])
287 if baseline
and relative
:
288 percent
= (avg
/baseline
- 1)*100
289 print "%+.1f%%" % percent
,
300 def table_variants_all_tests(plot_data
, columns
, colkeys
, benchmarks
,
301 myurl
, filtered_passthru
):
302 # generate html table of graph's numbers
303 # for primary metric over all benchmarks (rows),
304 # on one platform and one kernel,
305 # over various combos of test run attribute constraints (cols).
307 print "<table border=1 cellpadding=3 cellspacing=0>"
308 print "<tr> <td><b> Benchmark </b></td>",
310 print "<td><b>", colkeys
[col
].replace(',', ',<br>'), "</b></td>"
312 for benchmark
in benchmarks
:
313 print "<tr> <td><b>", benchmark
, "</b></td>"
316 vals
= plot_data
[col
].get(benchmark
, [])
320 (avg
, std_dev
) = plotgraph
.avg_dev(vals
)
321 args
= filtered_passthru
[:]
322 perf
.append_cgi_args(args
, {'test':benchmark
})
323 for keyval
in colkeys
[col
].split(','):
324 key
, val
= keyval
.split('=', 1)
325 perf
.append_cgi_args(args
, {key
:val
})
326 print "<a href='%s?%s&runs&attrs'>" % (myurl
,
328 print "<b>%.4g</b>" % avg
, "</a><br>",
329 print " <small> %dr </small>" % len(vals
),
330 print " <small> %.3g </small>" % std_dev
,
334 print "<p> <b>Bold value:</b> Average of this metric, then <br>"
335 print "number of good test runs, then standard deviation of those runs"
338 def table_testrun_details(runs
, metric
, tko_server
, show_attrs
):
339 print "<table border=1 cellpadding=4 cellspacing=0>"
340 print "<tr><td> %s metric </td>" % metric
341 print "<td> Job label </td> <td> Job tag </td> <td> Run results </td>"
342 print "<td> Started_time </td>"
344 print "<td> Test attributes </td>"
347 for testrunx
in runs
:
349 vals
= perf
.get_metric_at_point([testrunx
], metric
)
351 print "%.4g " % v
,
353 row
= get_testrun_context(testrunx
)
354 row
[2] = ( "<a href='//%s/results/%s/%s/results/keyval'> %s </a>"
355 % (tko_server
, row
[1], row
[2], row
[2]) )
357 print "<td> %s </td>" % v
359 attrs
= get_test_attributes(testrunx
)
361 for attr
in sorted(attrs
.keys()):
362 if attr
== "sysinfo-cmdline": continue
363 if attr
[:4] == "svs-": continue
366 val
= val
[:40-3] + "..."
367 print "%s=%s " % (attr
, val
)
373 def overview_thumb(test
, metric
, myurl
, passthru
):
374 pass_
= passthru
+ ['test=%s' % test
]
376 pass_
+= ['metric=%s' % metric
]
377 pass_
= '&'.join(pass_
)
378 print "<a href='%s?%s&table'>" % (myurl
, pass_
)
379 print " <img src='%s?%s&size=450,500'> </a>" % (myurl
, pass_
)
380 # embedded graphs fit 3 across on 1400x1050 laptop
383 def graph_1_test(title
, metric
, plot_data
, line_argname
, lines
,
384 kernel_legend
, relative
, size
, dark
=False):
385 # generate graph image for one benchmark, showing avg and
386 # std dev of one metric, over various kernels (X columns),
387 # over various platforms or antagonists etc (graphed lines)
388 xlegend
= kernel_legend
389 ylegend
= metric
.capitalize()
391 ylegend
+= ', Relative'
396 keytitle
= line_argname
.capitalize() + ':'
399 graph
= plotgraph
.gnuplot(title
, xlegend
, ylegend
, size
=size
,
400 xsort
=perf
.sort_kernels
, keytitle
=keytitle
)
403 if not label
and line_argname
== 'antag':
405 graph
.add_dataset(label
, plot_data
[line
])
406 graph
.plot(cgi_header
=True, ymin
=ymin
, dark
=dark
)
409 def graph_variants_all_tests(title
, plot_data
, linekeys
, size
, dark
):
410 # generate graph image showing all benchmarks
411 # on one platform and one kernel,
412 # over various combos of test run attribute constraints (lines).
413 xlegend
= "Benchmark"
414 ylegend
= "Relative Perf"
415 graph
= plotgraph
.gnuplot(title
, xlegend
, ylegend
, size
=size
)
417 graph
.add_dataset(linekeys
[i
], plot_data
[i
])
418 graph
.plot(cgi_header
=True, dark
=dark
, ymin
=0.8)
421 class generate_views(object):
424 def __init__(self
, kernel_legend
, benchmarks
, test_group
,
425 site_benchmark_metrics
, tko_server
,
426 jobs_selector
, no_antag
):
427 self
.kernel_legend
= kernel_legend
428 self
.benchmarks
= benchmarks
429 self
.test_group
= test_group
430 self
.tko_server
= tko_server
431 self
.jobs_selector
= jobs_selector
432 self
.no_antag
= no_antag
435 test
, antagonists
= self
.parse_most_cgi_args()
437 perf
.init(tko_server
=tko_server
)
438 for b
in site_benchmark_metrics
:
439 perf
.add_benchmark_main_metric(b
, site_benchmark_metrics
[b
])
441 self
.test_runs
= {} # kernel --> (platform --> list of test runs)
442 self
.job_table
= {} # kernel id --> list of job idxs
443 self
.kernel_dates
= {} # kernel id --> date of nightly test
445 vary
= self
.cgiform
.getlist('vary')
447 platform
= self
.platforms_filter
[0]
448 self
.analyze_variants_all_tests_1_platform(platform
, vary
)
450 self
.analyze_1_test(test
, antagonists
)
452 self
.overview_page_all_tests(self
.benchmarks
, antagonists
)
455 def collect_all_testruns(self
, trimmed_kernels
, test
):
456 # get test_runs run #s for 1 test on some kernels and platforms
457 for kernel
in trimmed_kernels
:
458 runs
= collect_testruns(self
.job_table
[kernel
], test
,
459 self
.test_attributes
, self
.platforms_filter
,
460 'by_hosts' in self
.toggles
, self
.no_antag
)
462 self
.test_runs
[kernel
] = runs
465 def table_for_graph_1_test(self
, title
, metric
, plot_data
,
466 column_argname
, columns
, filtered_passthru
):
467 # generate detailed html page with 1 graph and corresp numbers
468 # for 1 benchmark metric over all kernels (rows),
469 # over various platforms or various antagonists etc (cols).
471 print '<h3> %s </h3>' % title
472 print ('%s, machine group %s on //%s server <br>' %
473 (self
.kernel_legend
, self
.test_group
, self
.tko_server
))
475 print '%s test script series <br>' % self
.test_tag
[1:]
477 print "<img src='%s?%s'>" % (self
.myurl
, '&'.join(self
.passthru
))
479 link
= abs_rel_link(self
.myurl
, self
.passthru
+['table'])
480 print "<p><p> <h4> Redraw this with %s performance? </h4>" % link
482 heading
= "%s, %s metric" % (title
, metric
)
484 heading
+= ", relative"
485 print "<p><p> <h3> %s: </h3>" % heading
486 table_1_metric_all_kernels(plot_data
, columns
, column_argname
,
487 self
.kernels
, self
.kernel_dates
,
488 self
.myurl
, filtered_passthru
)
489 print "</body></html>"
492 def graph_1_test_all_platforms(self
, test
, metric
, platforms
, plot_data
):
493 # generate graph image for one benchmark
494 title
= test
.capitalize()
495 if 'regress' in self
.toggles
:
496 title
+= ' Regressions'
497 if 'table' in self
.cgiform
:
498 self
.table_for_graph_1_test(title
, metric
, plot_data
,
499 'platforms', platforms
,
500 filtered_passthru
=self
.passthru
)
502 graph_1_test(title
, metric
, plot_data
, 'platforms', platforms
,
503 self
.kernel_legend
, self
.relative
,
504 self
.size
, 'dark' in self
.toggles
)
507 def testrun_details(self
, title
, runs
, metric
):
509 print '<h3> %s </h3>' % title
510 print ('%s, machine group %s on //%s server' %
511 (self
.kernel_legend
, self
.test_group
, self
.tko_server
))
513 print '<br> %s test script series' % self
.test_tag
[1:]
515 table_testrun_details(runs
, metric
,
516 self
.tko_server
, 'attrs' in self
.cgiform
)
517 print "</body></html>"
520 def testrun_details_for_1_test_kernel_platform(self
, test
,
522 default_kernel
= min(self
.test_runs
.keys())
523 kernel
= self
.cgiform
.getvalue('kernel', default_kernel
)
524 title
= '%s on %s using %s' % (test
.capitalize(), platform
, kernel
)
525 runs
= self
.test_runs
[kernel
].get(platform
, [])
526 self
.testrun_details(title
, runs
, metric
)
529 def analyze_1_metric_all_platforms(self
, test
, metric
):
530 if 'regress' in self
.toggles
:
531 regressed_platforms
= find_regressions(self
.kernels
, self
.test_runs
,
534 regressed_platforms
= None
535 plot_data
= collect_scaled_scores(metric
, self
.test_runs
,
536 regressed_platforms
, self
.relative
)
537 platforms
= sorted(plot_data
.keys())
541 elif 'runs' in self
.cgiform
:
542 self
.testrun_details_for_1_test_kernel_platform(test
, metric
,
545 self
.graph_1_test_all_platforms(test
, metric
, platforms
, plot_data
)
548 def analyze_all_metrics_1_platform(self
, test
, platform
):
549 # TODO: show #runs in header
551 heading
= "%s %s:  %s" % (self
.test_group
, self
.kernel_legend
,
553 print "<h2> %s </h2>" % heading
554 print "platform=%s <br>" % platform
555 for attr
in self
.test_attributes
:
556 print "%s=%s " % (attr
, self
.test_attributes
[attr
])
558 table_all_metrics_1_platform(self
.test_runs
, platform
, self
.relative
)
559 print "</body></html>"
562 def table_for_variants_all_tests(self
, title
, plot_data
, colkeys
, columns
,
563 filtered_passthru
, test_tag
):
564 # generate detailed html page with 1 graph and corresp numbers
565 # for primary metric over all benchmarks (rows),
566 # on one platform and one kernel,
567 # over various combos of test run attribute constraints (cols).
569 print '<h3> %s </h3>' % title
570 print ('%s, machine group %s on //%s server <br>' %
571 (self
.kernel_legend
, self
.test_group
, self
.tko_server
))
573 print '%s test script series <br>' % test_tag
[1:]
575 varies
= ['vary='+colkeys
[col
] for col
in columns
]
576 print "<img src='%s?%s'>" % (self
.myurl
, '&'.join(self
.passthru
+varies
))
578 print "<p><p> <h3> %s: </h3>" % title
579 table_variants_all_tests(plot_data
, columns
, colkeys
, self
.benchmarks
,
580 self
.myurl
, filtered_passthru
)
581 print "</body></html>"
584 def analyze_variants_all_tests_1_platform(self
, platform
, vary
):
585 # generate one graph image for results of all benchmarks
586 # on one platform and one kernel, comparing effects of
587 # two or more combos of kernel options (test run attributes)
588 # (numa_fake,stale_page,kswapd_merge,sched_idle, etc)
589 kernel
= self
.cgiform
.getvalue('kernel', 'some_kernel')
590 self
.passthru
.append('kernel=%s' % kernel
)
592 # two or more vary_groups, one for each plotted line,
593 # each group begins with vary= and ends with next &
594 # each group has comma-separated list of test attribute key=val pairs
595 # eg vary=keyval1,keyval2&vary=keyval3,keyval4
596 vary_groups
= [dict(pair
.split('=',1) for pair
597 in vary_group
.split(','))
598 for vary_group
in vary
]
600 test
= self
.benchmarks
[0] # pick any test in all jobs
601 kernels
, test_tag
= self
.jobs_selector(test
, self
.job_table
,
607 for i
, vary_group
in enumerate(vary_groups
):
608 group_attributes
= self
.test_attributes
.copy()
609 group_attributes
.update(vary_group
)
610 linekey
= ','.join('%s=%s' % (attr
, vary_group
[attr
])
611 for attr
in vary_group
)
612 linekeys
[i
] = linekey
614 for benchmark
in self
.benchmarks
:
615 metric
= perf
.benchmark_main_metric(benchmark
)
616 runs
= collect_testruns(self
.job_table
[kernel
],
619 self
.platforms_filter
,
620 'by_hosts' in self
.toggles
,
623 for testrunx
in runs
[platform
]:
624 vals
+= perf
.get_metric_at_point([testrunx
], metric
)
626 if benchmark
not in baselines
:
627 baselines
[benchmark
], stddev
= plotgraph
.avg_dev(vals
)
628 vals
= [val
/baselines
[benchmark
] for val
in vals
]
629 data
[benchmark
] = vals
632 title
= "%s on %s" % (kernel
, platform
)
633 for attr
in self
.test_attributes
:
634 title
+= ', %s=%s' % (attr
, self
.test_attributes
[attr
])
635 if 'table' in self
.cgiform
:
636 self
.table_for_variants_all_tests(title
, plot_data
, linekeys
,
637 range(len(linekeys
)),
638 filtered_passthru
=self
.passthru
,
641 graph_variants_all_tests(title
, plot_data
, linekeys
,
642 self
.size
, 'dark' in self
.toggles
)
645 def graph_twoway_antagonists_1_test_1_platform(
646 self
, test
, metric
, platform
, antagonists
, twoway_runs
):
647 # generate graph of one benchmark's performance paired with
648 # various antagonists, with one plotted line per antagonist,
649 # over most kernels (X axis), all on one machine type
650 # performance is relative to the no-antag baseline case
651 plot_data
= collect_twoway_scores(metric
, antagonists
,
652 twoway_runs
, self
.relative
)
653 title
= "%s vs. an Antagonist on %s:" % (test
.capitalize(), platform
)
654 if 'table' in self
.cgiform
:
655 filtered_passthru
= [arg
for arg
in self
.passthru
656 if not arg
.startswith('antag=')]
657 self
.table_for_graph_1_test(title
, metric
, plot_data
,
658 'antag', antagonists
,
659 filtered_passthru
=filtered_passthru
)
661 graph_1_test(title
, metric
, plot_data
, 'antag', antagonists
,
662 self
.kernel_legend
, self
.relative
,
663 self
.size
, 'dark' in self
.toggles
)
666 def testrun_details_for_twoway_test(self
, test
, metric
, platform
,
667 antagonist
, twoway_runs
):
668 default_kernel
= min(twoway_runs
.keys())
669 kernel
= self
.cgiform
.getvalue('kernel', default_kernel
)
670 title
= '%s vs. Antagonist %s on %s using %s' % (
671 test
.capitalize(), antagonist
.capitalize(), platform
, kernel
)
672 runs
= twoway_runs
[kernel
].get(antagonist
, [])
673 self
.testrun_details(title
, runs
, metric
)
676 def analyze_twoway_antagonists_1_test_1_platform(
677 self
, test
, metric
, platform
, antagonists
):
678 twoway_runs
, all_antagonists
= divide_twoway_testruns(self
.test_runs
,
680 if antagonists
== ['*']:
681 antagonists
= all_antagonists
685 elif 'runs' in self
.cgiform
:
686 self
.testrun_details_for_twoway_test(
687 test
, metric
, platform
, antagonists
[0], twoway_runs
)
689 self
.graph_twoway_antagonists_1_test_1_platform(
690 test
, metric
, platform
, antagonists
, twoway_runs
)
693 def get_twoway_default_platform(self
):
694 if self
.platforms_filter
:
695 return self
.platforms_filter
[0]
697 kernels
, test_tag
= self
.jobs_selector(test
, self
.job_table
,
699 self
.collect_all_testruns(kernels
, test
+test_tag
)
700 return all_tested_platforms(self
.test_runs
)[0]
703 def overview_page_all_tests(self
, benchmarks
, antagonists
):
704 # generate overview html page with small graphs for each benchmark
705 # linking to detailed html page for that benchmark
706 # recursively link to this same cgi to generate each image
708 if antagonists
is not None:
709 heading
= ('Twoway Container Isolation using %s on %s' %
710 (self
.kernel_legend
, self
.get_twoway_default_platform()))
712 heading
= '%s, %s Benchmarks' % (self
.kernel_legend
,
714 if 'regress' in self
.toggles
:
715 heading
+= ", Regressions Only"
716 print "<h3> %s </h3>" % heading
717 for test
in benchmarks
:
718 overview_thumb(test
, '', self
.myurl
, self
.passthru
)
719 if test
== 'unixbench':
720 overview_thumb('unixbench', 'Process_creation',
721 self
.myurl
, self
.passthru
)
723 link
= abs_rel_link(self
.myurl
, self
.passthru
)
724 print "<p><p> <h4> Redraw this with %s performance? </h4>" % link
725 print "</body></html>"
728 def analyze_1_test(self
, test
, antagonists
):
729 self
.passthru
.append('test=%s' % test
)
730 metric
= self
.cgiform
.getvalue('metric', '')
732 self
.passthru
.append('metric=%s' % metric
)
734 metric
= perf
.benchmark_main_metric(test
)
735 assert metric
, "no default metric for test %s" % test
736 self
.kernels
, self
.test_tag
= self
.jobs_selector(test
, self
.job_table
,
738 self
.collect_all_testruns(self
.kernels
, test
+self
.test_tag
)
739 if not self
.platforms_filter
and (metric
== '*' or
740 antagonists
is not None):
741 # choose default platform
742 self
.platforms_filter
= all_tested_platforms(self
.test_runs
)[0:1]
743 self
.passthru
.append('platforms=%s' %
744 ','.join(self
.platforms_filter
))
745 if antagonists
is not None:
746 antagonists
= antagonists
.split(',')
747 if len(antagonists
) == 1 and antagonists
!= ['*']:
748 self
.relative
= False
749 self
.analyze_twoway_antagonists_1_test_1_platform(
750 test
, metric
, self
.platforms_filter
[0], antagonists
)
752 platform
= self
.platforms_filter
[0]
753 self
.analyze_all_metrics_1_platform(test
, platform
)
755 self
.analyze_1_metric_all_platforms(test
, metric
)
758 def parse_most_cgi_args(self
):
759 self
.myurl
= os
.path
.basename(sys
.argv
[0])
760 self
.cgiform
= cgi
.FieldStorage(keep_blank_values
=True)
761 self
.size
= self
.cgiform
.getvalue('size', '1200,850')
762 all_toggles
= set(('absolute', 'regress', 'dark', 'by_hosts'))
763 self
.toggles
= set(tog
for tog
in all_toggles
if tog
in self
.cgiform
)
764 platforms
= self
.cgiform
.getvalue('platforms', '')
766 self
.toggles
.add('by_hosts')
767 self
.passthru
= list(self
.toggles
)
768 self
.relative
= 'absolute' not in self
.toggles
770 self
.passthru
.append('platforms=%s' % platforms
)
771 self
.platforms_filter
= platforms
.split(',')
773 self
.platforms_filter
= []
774 self
.test_attributes
= perf
.parse_test_attr_args(self
.cgiform
)
775 perf
.append_cgi_args(self
.passthru
, self
.test_attributes
)
776 test
= self
.cgiform
.getvalue('test', '')
777 if 'antag' in self
.cgiform
:
778 antagonists
= ','.join(self
.cgiform
.getlist('antag'))
780 # or antag=test1,test2,test3,...
781 # or antag=test1&antag=test2&...
782 # testN is empty for solo case of no antagonist
783 self
.passthru
.append('antag=%s' % antagonists
)
785 antagonists
= None # not same as ''
786 return test
, antagonists