3 # This Source Code Form is subject to the terms of the Mozilla Public
4 # License, v. 2.0. If a copy of the MPL was not distributed with this
5 # file, You can obtain one at http://mozilla.org/MPL/2.0/.
7 use CGI
::Carp
qw(fatalsToBrowser);
13 use vars
qw($dbh $arc $dbroot); # current db, and db/archive
15 use constant STALE_AGE => 5 * 60; # seconds
17 # show a chart of this run; turned off in automated tests, and where
18 # an installation hasn't set up the required modules and libraries
19 use constant SHOW_CHART => 0;
21 sub createArchiveMetaTable {
22 my $table = "tMetaTable";
23 return if -e "$dbroot/archive/$table"; # don't create it if it exists
24 warn "createMetaTable:\t$dbroot/archive/$table";
25 mkdir "$dbroot/archive" unless -d "$dbroot/archive";
28 CREATE TABLE tMetaTable
29 (DATETIME CHAR(14), LASTPING CHAR(14),
30 ID CHAR(8), INDEX INTEGER,
31 CUR_IDX INTEGER, CUR_CYC INTEGER,
32 CUR_CONTENT CHAR(128), STATE INTEGER,
33 BLESSED INTEGER, MAXCYC INTEGER,
34 MAXIDX INTEGER, REPLACE INTEGER,
35 NOCACHE INTEGER, DELAY INTEGER,
36 REMOTE_USER CHAR(16), HTTP_USER_AGENT CHAR(128),
37 REMOTE_ADDR CHAR(15), USER_EMAIL CHAR(32),
38 USER_COMMENT CHAR(256)
41 $sth = $arc->prepare($sql);
44 warn 'created archive meta table';
49 sub purgeStaleEntries {
51 my $metatable = "tMetaTable";
53 # first, remove dead stuff
54 my $sql = qq{SELECT * FROM $metatable
55 WHERE STATE = "INIT" OR STATE = "OPEN"};
56 my $sth = $dbh->prepare($sql);
60 while (my @data = $sth->fetchrow_array()) {
61 my $age = $now - timestamp2Time($data[1]);
62 # if OPEN or INIT, and not heard from in 10 minutes, then it's never coming
63 # back here again. Delete the entry. Whine in the error_log.
64 if ($age > STALE_AGE) {
65 warn "deleting stale record+table, id = $data[2], last = $data[1], @data";
66 $dbh->do( qq(DELETE FROM $metatable WHERE ID = "$data[2]") );
67 $dbh->do("DROP TABLE t" . $data[2]);
69 $status .= "$age @data\n";
73 # now move any COMPLETE records to archive
74 $sql = qq{SELECT * FROM $metatable};
75 $sth = $dbh->prepare($sql);
78 while (my @data = $sth->fetchrow_array()) {
79 my $age = $now - timestamp2Time($data[1]);
80 # This keeps the "live" entries from growing too slow.
81 # If COMPLETE and older than 10 minutes, move to archive.
82 if ($age > STALE_AGE) {
83 warn "moving COMPLETE record+table, id = $data[2], last = $data[1], @data";
84 moveRecordToArchive($data[2], \@data);
85 $dbh->do( qq(DELETE FROM $metatable WHERE ID = "$data[2]") );
92 # Don't move it if showing a chart. (Otherwise, if showing a
93 # a chart, I'd have to do a little extra work to make sure I
94 # didn't yank the record away from the IMG request)
95 $sql = qq{SELECT * FROM $metatable WHERE ID = "$id"};
96 $sth = $dbh->prepare($sql);
98 while (my @data = $sth->fetchrow_array()) {
99 warn "moving COMPLETE record+table, id = $id, @data\n";
100 moveRecordToArchive($data[2], \@data);
101 $dbh->do( qq(DELETE FROM $metatable WHERE ID = "$data[2]") );
108 sub moveRecordToArchive {
109 my $id = shift || die "no id";
110 my $dataref = shift || die "no dataref";
111 createArchiveMetaTable(); # if it doesn't exist
112 insertIntoMetaTable($dataref);
113 File::Copy::move("$dbroot/t$id", "$dbroot/archive/t$id");
117 sub insertIntoMetaTable {
118 my $dataref = shift || die "no dataref";
119 my $table = "tMetaTable";
123 (DATETIME, LASTPING, ID,
124 INDEX, CUR_IDX, CUR_CYC,
125 CUR_CONTENT, STATE, BLESSED,
126 MAXCYC, MAXIDX, REPLACE,
127 NOCACHE, DELAY, REMOTE_USER,
128 HTTP_USER_AGENT, REMOTE_ADDR, USER_EMAIL,
137 $sth = $arc->prepare($sql);
138 $sth->execute(@$dataref);
143 sub timestamp2Time ($) {
146 my @datetime = reverse unpack 'A4A2A2A2A2A2', $str;
147 --$datetime[4]; # month: 0-11
148 return Time::Local::timelocal(@datetime);
152 sub serializeDataSet {
153 # package up this data for storage elsewhere
155 my $data = "avgmedian|" . $rs->{avgmedian};
156 $data .= "|average|" . $rs->{average};
157 $data .= "|minimum|" . $rs->{minimum};
158 $data .= "|maximum|" . $rs->{maximum};
162 s/\s*\n/\|/gs; # fold newlines
165 return $data . ":" . $_;
171 my $request = new CGI::Request;
172 my $id = $request->param('id'); #XXX need to check for valid parameter id
173 my $rs = URLTimingDataSet->new($id);
175 print "Content-type: text/html\n\n";
177 # This sucks: we'll let the test time out to avoid crash-on-shutdown bugs
178 print "<html><body onload='window.close();'>";
180 # dump some stats for tinderbox to snarf
183 print "if (window.dump) dump('";
184 print "Starting Page Load Test\\n\\\n";
185 print "Test id: $id\\n\\\n";
186 print "Avg. Median : ", $rs->{avgmedian}, " msec\\n\\\n";
187 print "Average : ", $rs->{average}, " msec\\n\\\n";
188 print "Minimum : ", $rs->{minimum}, " msec\\n\\\n";
189 print "Maximum : ", $rs->{maximum}, " msec\\n\\\n";
190 print "IDX PATH AVG MED MAX MIN TIMES ...\\n\\\n";
191 if ($request->param('sort')) {
192 $_ = $rs->as_string_sorted();
194 $_ = $rs->as_string();
197 # Terminate raw newlines with '\n\' so we don't have an unterminated string literal.
201 print "(tinderbox dropping follows)\\n\\\n";
202 print "_x_x_mozilla_page_load," , $rs->{avgmedian}, ",", $rs->{maximum}, ",", $rs->{minimum}, "\\n\\\n";
204 # package up this data for storage elsewhere
206 my $data = serializeDataSet($rs);
207 print "_x_x_mozilla_page_load_details,", $data, "\\n\\\n";
211 #print "TinderboxPrint:<a title=\"Avg. of the median per url pageload time.\" href=\"http://tegu.mozilla.org/graph/query.cgi?tbox=spider&testname=pageload&autoscale=1&days=7&avg=1\">Tp:", $rs->{avgmedian}, "ms</a>", "\\n\\\n";
213 print "</script></body></html>\n";
217 # If this is SurfingSafari, then catch a wave and you're sitting on top of the world!!
218 # (and also blat this out to tegu, cause we got no 'dump' statement.
220 if ($request->cgi->var("HTTP_USER_AGENT") =~ /Safari/) {
223 "10.169.105.26" => "boxset",
224 "10.169.105.21" => "pawn"
226 my $ip = $request->cgi->var('REMOTE_ADDR');
227 my $machine = $machineMap{$ip};
230 use HTTP::Request::Common qw(POST);
231 my $ua = LWP
::UserAgent
->new;
232 $ua->timeout(10); # seconds
233 my $req = POST
('http://tegu.mozilla.org/graph/collect.cgi',
234 [testname
=> 'pageload',
235 tbox
=> "$machine" . "-aux",
236 value
=> $rs->{avgmedian
},
238 my $res = $ua->request($req);
242 warn "Failed to submit startup results: $@";
244 warn "Startup results submitted to server: \n",
245 $res->status_line, "\n", $res->content, "\n";
250 if ($request->param('purge')) {
251 # now move any old stuff into archive and clean stale entries
252 # just going with the simple approach of "whoever sees old entries
253 # first, cleans em up, whether they 'own' them or not". Hopefully,
254 # the default locking will be sufficient to prevent a race.
258 $dbh = DBI
->connect("DBI:CSV:f_dir=./$dbroot",
259 {RaiseError
=> 1, AutoCommit
=> 1})
260 || die "Cannot connect: " . $DBI::errstr
;
261 $arc = DBI
->connect("DBI:CSV:f_dir=./$dbroot/archive",
262 {RaiseError
=> 1, AutoCommit
=> 1})
263 || die "Cannot connect: " . $DBI::errstr
;
264 purgeStaleEntries
($id);