Initial bulk commit for "Git on MSys"
[msysgit/historical-msysgit.git] / lib / perl5 / 5.6.1 / Test / Harness.pm
blobf446e65029146c4995da79cf5cf51b61f398999b
1 package Test::Harness;
3 use 5.005_64;
4 use Exporter;
5 use Benchmark;
6 use Config;
7 use FileHandle;
8 use strict;
10 our($VERSION, $verbose, $switches, $have_devel_corestack, $curtest,
11 $columns, @ISA, @EXPORT, @EXPORT_OK);
12 $have_devel_corestack = 0;
14 $VERSION = "1.1604";
16 $ENV{HARNESS_ACTIVE} = 1;
18 # Some experimental versions of OS/2 build have broken $?
19 my $ignore_exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
21 my $files_in_dir = $ENV{HARNESS_FILELEAK_IN_DIR};
23 my $tests_skipped = 0;
24 my $subtests_skipped = 0;
26 @ISA=('Exporter');
27 @EXPORT= qw(&runtests);
28 @EXPORT_OK= qw($verbose $switches);
30 $verbose = 0;
31 $switches = "-w";
32 $columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
34 sub globdir { opendir DIRH, shift; my @f = readdir DIRH; closedir DIRH; @f }
36 sub runtests {
37 my(@tests) = @_;
38 local($|) = 1;
39 my($test,$te,$ok,$next,$max,$pct,$totbonus,@failed,%failedtests);
40 my $totmax = 0;
41 my $totok = 0;
42 my $files = 0;
43 my $bad = 0;
44 my $good = 0;
45 my $total = @tests;
47 # pass -I flags to children
48 my $old5lib = $ENV{PERL5LIB};
50 # VMS has a 255-byte limit on the length of %ENV entries, so
51 # toss the ones that involve perl_root, the install location
52 # for VMS
53 my $new5lib;
54 if ($^O eq 'VMS') {
55 $new5lib = join($Config{path_sep}, grep {!/perl_root/i;} @INC);
56 $switches =~ s/-(\S*[A-Z]\S*)/"-$1"/g;
58 else {
59 $new5lib = join($Config{path_sep}, @INC);
62 local($ENV{'PERL5LIB'}) = $new5lib;
64 my @dir_files = globdir $files_in_dir if defined $files_in_dir;
65 my $t_start = new Benchmark;
66 while ($test = shift(@tests)) {
67 $te = $test;
68 chop($te);
69 if ($^O eq 'VMS') { $te =~ s/^.*\.t\./[.t./s; }
70 my $blank = (' ' x 77);
71 my $leader = "$te" . '.' x (20 - length($te));
72 my $ml = "";
73 $ml = "\r$blank\r$leader"
74 if -t STDOUT and not $ENV{HARNESS_NOTTY} and not $verbose;
75 print $leader;
76 my $fh = new FileHandle;
77 $fh->open($test) or print "can't open $test. $!\n";
78 my $first = <$fh>;
79 my $s = $switches;
80 $s .= " $ENV{'HARNESS_PERL_SWITCHES'}"
81 if exists $ENV{'HARNESS_PERL_SWITCHES'};
82 $s .= join " ", q[ "-T"], map {qq["-I$_"]} @INC
83 if $first =~ /^#!.*\bperl.*-\w*T/;
84 $fh->close or print "can't close $test. $!\n";
85 my $cmd = ($ENV{'HARNESS_COMPILE_TEST'})
86 ? "./perl -I../lib ../utils/perlcc $test "
87 . "-r 2>> ./compilelog |"
88 : "$^X $s $test|";
89 $cmd = "MCR $cmd" if $^O eq 'VMS';
90 $fh->open($cmd) or print "can't run $test. $!\n";
91 $ok = $next = $max = 0;
92 @failed = ();
93 my %todo = ();
94 my $bonus = 0;
95 my $skipped = 0;
96 my $skip_reason;
97 while (<$fh>) {
98 if( $verbose ){
99 print $_;
101 if (/^1\.\.([0-9]+) todo([\d\s]+)\;/) {
102 $max = $1;
103 for (split(/\s+/, $2)) { $todo{$_} = 1; }
104 $totmax += $max;
105 $files++;
106 $next = 1;
107 } elsif (/^1\.\.([0-9]+)(\s*\#\s*[Ss]kip\S*(?>\s+)(.+))?/) {
108 $max = $1;
109 $totmax += $max;
110 $files++;
111 $next = 1;
112 $skip_reason = $3 if not $max and defined $3;
113 } elsif ($max && /^(not\s+)?ok\b/) {
114 my $this = $next;
115 if (/^not ok\s*(\d*)/){
116 $this = $1 if $1 > 0;
117 print "${ml}NOK $this" if $ml;
118 if (!$todo{$this}) {
119 push @failed, $this;
120 } else {
121 $ok++;
122 $totok++;
124 } elsif (/^ok\s*(\d*)(\s*\#\s*[Ss]kip\S*(?:(?>\s+)(.+))?)?/) {
125 $this = $1 if $1 > 0;
126 print "${ml}ok $this/$max" if $ml;
127 $ok++;
128 $totok++;
129 $skipped++ if defined $2;
130 my $reason;
131 $reason = 'unknown reason' if defined $2;
132 $reason = $3 if defined $3;
133 if (defined $reason and defined $skip_reason) {
134 # print "was: '$skip_reason' new '$reason'\n";
135 $skip_reason = 'various reasons'
136 if $skip_reason ne $reason;
137 } elsif (defined $reason) {
138 $skip_reason = $reason;
140 $bonus++, $totbonus++ if $todo{$this};
142 if ($this > $next) {
143 # print "Test output counter mismatch [test $this]\n";
144 # no need to warn probably
145 push @failed, $next..$this-1;
146 } elsif ($this < $next) {
147 #we have seen more "ok" lines than the number suggests
148 print "Confused test output: test $this answered after test ", $next-1, "\n";
149 $next = $this;
151 $next = $this + 1;
154 $fh->close; # must close to reap child resource values
155 my $wstatus = $ignore_exitcode ? 0 : $?; # Can trust $? ?
156 my $estatus;
157 $estatus = ($^O eq 'VMS'
158 ? eval 'use vmsish "status"; $estatus = $?'
159 : $wstatus >> 8);
160 if ($wstatus) {
161 my ($failed, $canon, $percent) = ('??', '??');
162 printf "${ml}dubious\n\tTest returned status $estatus (wstat %d, 0x%x)\n",
163 $wstatus,$wstatus;
164 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
165 if (corestatus($wstatus)) { # until we have a wait module
166 if ($have_devel_corestack) {
167 Devel::CoreStack::stack($^X);
168 } else {
169 print "\ttest program seems to have generated a core\n";
172 $bad++;
173 if ($max) {
174 if ($next == $max + 1 and not @failed) {
175 print "\tafter all the subtests completed successfully\n";
176 $percent = 0;
177 $failed = 0; # But we do not set $canon!
178 } else {
179 push @failed, $next..$max;
180 $failed = @failed;
181 (my $txt, $canon) = canonfailed($max,$skipped,@failed);
182 $percent = 100*(scalar @failed)/$max;
183 print "DIED. ",$txt;
186 $failedtests{$test} = { canon => $canon, max => $max || '??',
187 failed => $failed,
188 name => $test, percent => $percent,
189 estat => $estatus, wstat => $wstatus,
191 } elsif ($ok == $max && $next == $max+1) {
192 if ($max and $skipped + $bonus) {
193 my @msg;
194 push(@msg, "$skipped/$max skipped: $skip_reason")
195 if $skipped;
196 push(@msg, "$bonus/$max unexpectedly succeeded")
197 if $bonus;
198 print "${ml}ok, ".join(', ', @msg)."\n";
199 } elsif ($max) {
200 print "${ml}ok\n";
201 } elsif (defined $skip_reason) {
202 print "skipped: $skip_reason\n";
203 $tests_skipped++;
204 } else {
205 print "skipped test on this platform\n";
206 $tests_skipped++;
208 $good++;
209 } elsif ($max) {
210 if ($next <= $max) {
211 push @failed, $next..$max;
213 if (@failed) {
214 my ($txt, $canon) = canonfailed($max,$skipped,@failed);
215 print "${ml}$txt";
216 $failedtests{$test} = { canon => $canon, max => $max,
217 failed => scalar @failed,
218 name => $test, percent => 100*(scalar @failed)/$max,
219 estat => '', wstat => '',
221 } else {
222 print "Don't know which tests failed: got $ok ok, expected $max\n";
223 $failedtests{$test} = { canon => '??', max => $max,
224 failed => '??',
225 name => $test, percent => undef,
226 estat => '', wstat => '',
229 $bad++;
230 } elsif ($next == 0) {
231 print "FAILED before any test output arrived\n";
232 $bad++;
233 $failedtests{$test} = { canon => '??', max => '??',
234 failed => '??',
235 name => $test, percent => undef,
236 estat => '', wstat => '',
239 $subtests_skipped += $skipped;
240 if (defined $files_in_dir) {
241 my @new_dir_files = globdir $files_in_dir;
242 if (@new_dir_files != @dir_files) {
243 my %f;
244 @f{@new_dir_files} = (1) x @new_dir_files;
245 delete @f{@dir_files};
246 my @f = sort keys %f;
247 print "LEAKED FILES: @f\n";
248 @dir_files = @new_dir_files;
252 my $t_total = timediff(new Benchmark, $t_start);
254 if ($^O eq 'VMS') {
255 if (defined $old5lib) {
256 $ENV{PERL5LIB} = $old5lib;
257 } else {
258 delete $ENV{PERL5LIB};
261 my $bonusmsg = '';
262 $bonusmsg = (" ($totbonus subtest".($totbonus>1?'s':'').
263 " UNEXPECTEDLY SUCCEEDED)")
264 if $totbonus;
265 if ($tests_skipped) {
266 $bonusmsg .= ", $tests_skipped test" . ($tests_skipped != 1 ? 's' : '');
267 if ($subtests_skipped) {
268 $bonusmsg .= " and $subtests_skipped subtest"
269 . ($subtests_skipped != 1 ? 's' : '');
271 $bonusmsg .= ' skipped';
273 elsif ($subtests_skipped) {
274 $bonusmsg .= ", $subtests_skipped subtest"
275 . ($subtests_skipped != 1 ? 's' : '')
276 . " skipped";
278 if ($bad == 0 && $totmax) {
279 print "All tests successful$bonusmsg.\n";
280 } elsif ($total==0){
281 die "FAILED--no tests were run for some reason.\n";
282 } elsif ($totmax==0) {
283 my $blurb = $total==1 ? "script" : "scripts";
284 die "FAILED--$total test $blurb could be run, alas--no output ever seen\n";
285 } else {
286 $pct = sprintf("%.2f", $good / $total * 100);
287 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
288 $totmax - $totok, $totmax, 100*$totok/$totmax;
289 # Create formats
290 # First, figure out max length of test names
291 my $failed_str = "Failed Test";
292 my $middle_str = " Status Wstat Total Fail Failed ";
293 my $list_str = "List of Failed";
294 my $max_namelen = length($failed_str);
295 my $script;
296 foreach $script (keys %failedtests) {
297 $max_namelen =
298 (length $failedtests{$script}->{name} > $max_namelen) ?
299 length $failedtests{$script}->{name} : $max_namelen;
301 my $list_len = $columns - length($middle_str) - $max_namelen;
302 if ($list_len < length($list_str)) {
303 $list_len = length($list_str);
304 $max_namelen = $columns - length($middle_str) - $list_len;
305 if ($max_namelen < length($failed_str)) {
306 $max_namelen = length($failed_str);
307 $columns = $max_namelen + length($middle_str) + $list_len;
311 my $fmt_top = "format STDOUT_TOP =\n"
312 . sprintf("%-${max_namelen}s", $failed_str)
313 . $middle_str
314 . $list_str . "\n"
315 . "-" x $columns
316 . "\n.\n";
317 my $fmt = "format STDOUT =\n"
318 . "@" . "<" x ($max_namelen - 1)
319 . " @>> @>>>> @>>>> @>>> ^##.##% "
320 . "^" . "<" x ($list_len - 1) . "\n"
321 . '{ $curtest->{name}, $curtest->{estat},'
322 . ' $curtest->{wstat}, $curtest->{max},'
323 . ' $curtest->{failed}, $curtest->{percent},'
324 . ' $curtest->{canon}'
325 . "\n}\n"
326 . "~~" . " " x ($columns - $list_len - 2) . "^"
327 . "<" x ($list_len - 1) . "\n"
328 . '$curtest->{canon}'
329 . "\n.\n";
331 eval $fmt_top;
332 die $@ if $@;
333 eval $fmt;
334 die $@ if $@;
336 # Now write to formats
337 for $script (sort keys %failedtests) {
338 $curtest = $failedtests{$script};
339 write;
341 if ($bad) {
342 $bonusmsg =~ s/^,\s*//;
343 print "$bonusmsg.\n" if $bonusmsg;
344 die "Failed $bad/$total test scripts, $pct% okay.$subpct\n";
347 printf("Files=%d, Tests=%d, %s\n", $files, $totmax, timestr($t_total, 'nop'));
349 return ($bad == 0 && $totmax) ;
352 my $tried_devel_corestack;
353 sub corestatus {
354 my($st) = @_;
356 eval {require 'wait.ph'};
357 my $ret = defined &WCOREDUMP ? WCOREDUMP($st) : $st & 0200;
359 eval { require Devel::CoreStack; $have_devel_corestack++ }
360 unless $tried_devel_corestack++;
362 $ret;
365 sub canonfailed ($@) {
366 my($max,$skipped,@failed) = @_;
367 my %seen;
368 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
369 my $failed = @failed;
370 my @result = ();
371 my @canon = ();
372 my $min;
373 my $last = $min = shift @failed;
374 my $canon;
375 if (@failed) {
376 for (@failed, $failed[-1]) { # don't forget the last one
377 if ($_ > $last+1 || $_ == $last) {
378 if ($min == $last) {
379 push @canon, $last;
380 } else {
381 push @canon, "$min-$last";
383 $min = $_;
385 $last = $_;
387 local $" = ", ";
388 push @result, "FAILED tests @canon\n";
389 $canon = "@canon";
390 } else {
391 push @result, "FAILED test $last\n";
392 $canon = $last;
395 push @result, "\tFailed $failed/$max tests, ";
396 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
397 my $ender = 's' x ($skipped > 1);
398 my $good = $max - $failed - $skipped;
399 my $goodper = sprintf("%.2f",100*($good/$max));
400 push @result, " (-$skipped skipped test$ender: $good okay, $goodper%)" if $skipped;
401 push @result, "\n";
402 my $txt = join "", @result;
403 ($txt, $canon);
407 __END__
409 =head1 NAME
411 Test::Harness - run perl standard test scripts with statistics
413 =head1 SYNOPSIS
415 use Test::Harness;
417 runtests(@tests);
419 =head1 DESCRIPTION
421 (By using the L<Test> module, you can write test scripts without
422 knowing the exact output this module expects. However, if you need to
423 know the specifics, read on!)
425 Perl test scripts print to standard output C<"ok N"> for each single
426 test, where C<N> is an increasing sequence of integers. The first line
427 output by a standard test script is C<"1..M"> with C<M> being the
428 number of tests that should be run within the test
429 script. Test::Harness::runtests(@tests) runs all the testscripts
430 named as arguments and checks standard output for the expected
431 C<"ok N"> strings.
433 After all tests have been performed, runtests() prints some
434 performance statistics that are computed by the Benchmark module.
436 =head2 The test script output
438 Any output from the testscript to standard error is ignored and
439 bypassed, thus will be seen by the user. Lines written to standard
440 output containing C</^(not\s+)?ok\b/> are interpreted as feedback for
441 runtests(). All other lines are discarded.
443 It is tolerated if the test numbers after C<ok> are omitted. In this
444 case Test::Harness maintains temporarily its own counter until the
445 script supplies test numbers again. So the following test script
447 print <<END;
448 1..6
449 not ok
451 not ok
456 will generate
458 FAILED tests 1, 3, 6
459 Failed 3/6 tests, 50.00% okay
461 The global variable $Test::Harness::verbose is exportable and can be
462 used to let runtests() display the standard output of the script
463 without altering the behavior otherwise.
465 The global variable $Test::Harness::switches is exportable and can be
466 used to set perl command line options used for running the test
467 script(s). The default value is C<-w>.
469 If the standard output line contains substring C< # Skip> (with
470 variations in spacing and case) after C<ok> or C<ok NUMBER>, it is
471 counted as a skipped test. If the whole testscript succeeds, the
472 count of skipped tests is included in the generated output.
474 C<Test::Harness> reports the text after C< # Skip(whatever)> as a
475 reason for skipping. Similarly, one can include a similar explanation
476 in a C<1..0> line emitted if the test is skipped completely:
478 1..0 # Skipped: no leverage found
480 =head1 EXPORT
482 C<&runtests> is exported by Test::Harness per default.
484 =head1 DIAGNOSTICS
486 =over 4
488 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
490 If all tests are successful some statistics about the performance are
491 printed.
493 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
495 For any single script that has failing subtests statistics like the
496 above are printed.
498 =item C<Test returned status %d (wstat %d)>
500 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8> and C<$?> are
501 printed in a message similar to the above.
503 =item C<Failed 1 test, %.2f%% okay. %s>
505 =item C<Failed %d/%d tests, %.2f%% okay. %s>
507 If not all tests were successful, the script dies with one of the
508 above messages.
510 =back
512 =head1 ENVIRONMENT
514 Setting C<HARNESS_IGNORE_EXITCODE> makes harness ignore the exit status
515 of child processes.
517 Setting C<HARNESS_NOTTY> to a true value forces it to behave as though
518 STDOUT were not a console. You may need to set this if you don't want
519 harness to output more frequent progress messages using carriage returns.
520 Some consoles may not handle carriage returns properly (which results
521 in a somewhat messy output).
523 Setting C<HARNESS_COMPILE_TEST> to a true value will make harness attempt
524 to compile the test using C<perlcc> before running it.
526 If C<HARNESS_FILELEAK_IN_DIR> is set to the name of a directory, harness
527 will check after each test whether new files appeared in that directory,
528 and report them as
530 LEAKED FILES: scr.tmp 0 my.db
532 If relative, directory name is with respect to the current directory at
533 the moment runtests() was called. Putting absolute path into
534 C<HARNESS_FILELEAK_IN_DIR> may give more predicatable results.
536 The value of C<HARNESS_PERL_SWITCHES> will be prepended to the
537 switches used to invoke perl on each test. For example, setting
538 C<HARNESS_PERL_SWITCHES> to "-W" will run all tests with all
539 warnings enabled.
541 If C<HARNESS_COLUMNS> is set, then this value will be used for the
542 width of the terminal. If it is not set then it will default to
543 C<COLUMNS>. If this is not set, it will default to 80. Note that users
544 of Bourne-sh based shells will need to C<export COLUMNS> for this
545 module to use that variable.
547 Harness sets C<HARNESS_ACTIVE> before executing the individual tests.
548 This allows the tests to determine if they are being executed through the
549 harness or by any other means.
551 =head1 SEE ALSO
553 L<Test> for writing test scripts and also L<Benchmark> for the
554 underlying timing routines.
556 =head1 AUTHORS
558 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
559 sure is, that it was inspired by Larry Wall's TEST script that came
560 with perl distributions for ages. Numerous anonymous contributors
561 exist. Current maintainer is Andreas Koenig.
563 =head1 BUGS
565 Test::Harness uses $^X to determine the perl binary to run the tests
566 with. Test scripts running via the shebang (C<#!>) line may not be
567 portable because $^X is not consistent for shebang scripts across
568 platforms. This is no problem when Test::Harness is run with an
569 absolute path to the perl binary or when $^X can be found in the path.
571 =cut