Track /etc/gitconfig
[msysgit/mtrensch.git] / lib / perl5 / 5.8.8 / Test / Harness.pm
blobcae6ad82dc3651077db2b2689fc36df84246fbef
1 # -*- Mode: cperl; cperl-indent-level: 4 -*-
3 package Test::Harness;
5 require 5.00405;
6 use Test::Harness::Straps;
7 use Test::Harness::Assert;
8 use Exporter;
9 use Benchmark;
10 use Config;
11 use strict;
14 use vars qw(
15 $VERSION
16 @ISA @EXPORT @EXPORT_OK
17 $Verbose $Switches $Debug
18 $verbose $switches $debug
19 $Curtest
20 $Columns
21 $Timer
22 $ML $Last_ML_Print
23 $Strap
24 $has_time_hires
27 BEGIN {
28 eval "use Time::HiRes 'time'";
29 $has_time_hires = !$@;
32 =head1 NAME
34 Test::Harness - Run Perl standard test scripts with statistics
36 =head1 VERSION
38 Version 2.56
40 =cut
42 $VERSION = "2.56";
44 # Backwards compatibility for exportable variable names.
45 *verbose = *Verbose;
46 *switches = *Switches;
47 *debug = *Debug;
49 $ENV{HARNESS_ACTIVE} = 1;
50 $ENV{HARNESS_VERSION} = $VERSION;
52 END {
53 # For VMS.
54 delete $ENV{HARNESS_ACTIVE};
55 delete $ENV{HARNESS_VERSION};
58 # Some experimental versions of OS/2 build have broken $?
59 my $Ignore_Exitcode = $ENV{HARNESS_IGNORE_EXITCODE};
61 my $Files_In_Dir = $ENV{HARNESS_FILELEAK_IN_DIR};
63 $Strap = Test::Harness::Straps->new;
65 sub strap { return $Strap };
67 @ISA = ('Exporter');
68 @EXPORT = qw(&runtests);
69 @EXPORT_OK = qw($verbose $switches);
71 $Verbose = $ENV{HARNESS_VERBOSE} || 0;
72 $Debug = $ENV{HARNESS_DEBUG} || 0;
73 $Switches = "-w";
74 $Columns = $ENV{HARNESS_COLUMNS} || $ENV{COLUMNS} || 80;
75 $Columns--; # Some shells have trouble with a full line of text.
76 $Timer = $ENV{HARNESS_TIMER} || 0;
78 =head1 SYNOPSIS
80 use Test::Harness;
82 runtests(@test_files);
84 =head1 DESCRIPTION
86 B<STOP!> If all you want to do is write a test script, consider
87 using Test::Simple. Test::Harness is the module that reads the
88 output from Test::Simple, Test::More and other modules based on
89 Test::Builder. You don't need to know about Test::Harness to use
90 those modules.
92 Test::Harness runs tests and expects output from the test in a
93 certain format. That format is called TAP, the Test Anything
94 Protocol. It is defined in L<Test::Harness::TAP>.
96 C<Test::Harness::runtests(@tests)> runs all the testscripts named
97 as arguments and checks standard output for the expected strings
98 in TAP format.
100 The F<prove> utility is a thin wrapper around Test::Harness.
102 =head2 Taint mode
104 Test::Harness will honor the C<-T> or C<-t> in the #! line on your
105 test files. So if you begin a test with:
107 #!perl -T
109 the test will be run with taint mode on.
111 =head2 Configuration variables.
113 These variables can be used to configure the behavior of
114 Test::Harness. They are exported on request.
116 =over 4
118 =item C<$Test::Harness::Verbose>
120 The package variable C<$Test::Harness::Verbose> is exportable and can be
121 used to let C<runtests()> display the standard output of the script
122 without altering the behavior otherwise. The F<prove> utility's C<-v>
123 flag will set this.
125 =item C<$Test::Harness::switches>
127 The package variable C<$Test::Harness::switches> is exportable and can be
128 used to set perl command line options used for running the test
129 script(s). The default value is C<-w>. It overrides C<HARNESS_SWITCHES>.
131 =item C<$Test::Harness::Timer>
133 If set to true, and C<Time::HiRes> is available, print elapsed seconds
134 after each test file.
136 =back
139 =head2 Failure
141 When tests fail, analyze the summary report:
143 t/base..............ok
144 t/nonumbers.........ok
145 t/ok................ok
146 t/test-harness......ok
147 t/waterloo..........dubious
148 Test returned status 3 (wstat 768, 0x300)
149 DIED. FAILED tests 1, 3, 5, 7, 9, 11, 13, 15, 17, 19
150 Failed 10/20 tests, 50.00% okay
151 Failed Test Stat Wstat Total Fail Failed List of Failed
152 -----------------------------------------------------------------------
153 t/waterloo.t 3 768 20 10 50.00% 1 3 5 7 9 11 13 15 17 19
154 Failed 1/5 test scripts, 80.00% okay. 10/44 subtests failed, 77.27% okay.
156 Everything passed but F<t/waterloo.t>. It failed 10 of 20 tests and
157 exited with non-zero status indicating something dubious happened.
159 The columns in the summary report mean:
161 =over 4
163 =item B<Failed Test>
165 The test file which failed.
167 =item B<Stat>
169 If the test exited with non-zero, this is its exit status.
171 =item B<Wstat>
173 The wait status of the test.
175 =item B<Total>
177 Total number of tests expected to run.
179 =item B<Fail>
181 Number which failed, either from "not ok" or because they never ran.
183 =item B<Failed>
185 Percentage of the total tests which failed.
187 =item B<List of Failed>
189 A list of the tests which failed. Successive failures may be
190 abbreviated (ie. 15-20 to indicate that tests 15, 16, 17, 18, 19 and
191 20 failed).
193 =back
196 =head2 Functions
198 Test::Harness currently only has one function, here it is.
200 =over 4
202 =item B<runtests>
204 my $allok = runtests(@test_files);
206 This runs all the given I<@test_files> and divines whether they passed
207 or failed based on their output to STDOUT (details above). It prints
208 out each individual test which failed along with a summary report and
209 a how long it all took.
211 It returns true if everything was ok. Otherwise it will C<die()> with
212 one of the messages in the DIAGNOSTICS section.
214 =cut
216 sub runtests {
217 my(@tests) = @_;
219 local ($\, $,);
221 my($tot, $failedtests) = _run_all_tests(@tests);
222 _show_results($tot, $failedtests);
224 my $ok = _all_ok($tot);
226 assert(($ok xor keys %$failedtests),
227 q{ok status jives with $failedtests});
229 return $ok;
232 =begin _private
234 =item B<_all_ok>
236 my $ok = _all_ok(\%tot);
238 Tells you if this test run is overall successful or not.
240 =cut
242 sub _all_ok {
243 my($tot) = shift;
245 return $tot->{bad} == 0 && ($tot->{max} || $tot->{skipped}) ? 1 : 0;
248 =item B<_globdir>
250 my @files = _globdir $dir;
252 Returns all the files in a directory. This is shorthand for backwards
253 compatibility on systems where C<glob()> doesn't work right.
255 =cut
257 sub _globdir {
258 opendir DIRH, shift;
259 my @f = readdir DIRH;
260 closedir DIRH;
262 return @f;
265 =item B<_run_all_tests>
267 my($total, $failed) = _run_all_tests(@test_files);
269 Runs all the given C<@test_files> (as C<runtests()>) but does it
270 quietly (no report). $total is a hash ref summary of all the tests
271 run. Its keys and values are this:
273 bonus Number of individual todo tests unexpectedly passed
274 max Number of individual tests ran
275 ok Number of individual tests passed
276 sub_skipped Number of individual tests skipped
277 todo Number of individual todo tests
279 files Number of test files ran
280 good Number of test files passed
281 bad Number of test files failed
282 tests Number of test files originally given
283 skipped Number of test files skipped
285 If C<< $total->{bad} == 0 >> and C<< $total->{max} > 0 >>, you've
286 got a successful test.
288 $failed is a hash ref of all the test scripts which failed. Each key
289 is the name of a test script, each value is another hash representing
290 how that script failed. Its keys are these:
292 name Name of the test which failed
293 estat Script's exit value
294 wstat Script's wait status
295 max Number of individual tests
296 failed Number which failed
297 percent Percentage of tests which failed
298 canon List of tests which failed (as string).
300 C<$failed> should be empty if everything passed.
302 B<NOTE> Currently this function is still noisy. I'm working on it.
304 =cut
306 # Turns on autoflush for the handle passed
307 sub _autoflush {
308 my $flushy_fh = shift;
309 my $old_fh = select $flushy_fh;
310 $| = 1;
311 select $old_fh;
314 sub _run_all_tests {
315 my @tests = @_;
317 _autoflush(\*STDOUT);
318 _autoflush(\*STDERR);
320 my(%failedtests);
322 # Test-wide totals.
323 my(%tot) = (
324 bonus => 0,
325 max => 0,
326 ok => 0,
327 files => 0,
328 bad => 0,
329 good => 0,
330 tests => scalar @tests,
331 sub_skipped => 0,
332 todo => 0,
333 skipped => 0,
334 bench => 0,
337 my @dir_files = _globdir $Files_In_Dir if defined $Files_In_Dir;
338 my $run_start_time = new Benchmark;
340 my $width = _leader_width(@tests);
341 foreach my $tfile (@tests) {
342 $Last_ML_Print = 0; # so each test prints at least once
343 my($leader, $ml) = _mk_leader($tfile, $width);
344 local $ML = $ml;
346 print $leader;
348 $tot{files}++;
350 $Strap->{_seen_header} = 0;
351 if ( $Test::Harness::Debug ) {
352 print "# Running: ", $Strap->_command_line($tfile), "\n";
354 my $test_start_time = $Timer ? time : 0;
355 my %results = $Strap->analyze_file($tfile) or
356 do { warn $Strap->{error}, "\n"; next };
357 my $elapsed;
358 if ( $Timer ) {
359 $elapsed = time - $test_start_time;
360 if ( $has_time_hires ) {
361 $elapsed = sprintf( " %8.3fs", $elapsed );
363 else {
364 $elapsed = sprintf( " %8ss", $elapsed ? $elapsed : "<1" );
367 else {
368 $elapsed = "";
371 # state of the current test.
372 my @failed = grep { !$results{details}[$_-1]{ok} }
373 1..@{$results{details}};
374 my %test = (
375 ok => $results{ok},
376 'next' => $Strap->{'next'},
377 max => $results{max},
378 failed => \@failed,
379 bonus => $results{bonus},
380 skipped => $results{skip},
381 skip_reason => $results{skip_reason},
382 skip_all => $Strap->{skip_all},
383 ml => $ml,
386 $tot{bonus} += $results{bonus};
387 $tot{max} += $results{max};
388 $tot{ok} += $results{ok};
389 $tot{todo} += $results{todo};
390 $tot{sub_skipped} += $results{skip};
392 my($estatus, $wstatus) = @results{qw(exit wait)};
394 if ($results{passing}) {
395 # XXX Combine these first two
396 if ($test{max} and $test{skipped} + $test{bonus}) {
397 my @msg;
398 push(@msg, "$test{skipped}/$test{max} skipped: $test{skip_reason}")
399 if $test{skipped};
400 push(@msg, "$test{bonus}/$test{max} unexpectedly succeeded")
401 if $test{bonus};
402 print "$test{ml}ok$elapsed\n ".join(', ', @msg)."\n";
404 elsif ( $test{max} ) {
405 print "$test{ml}ok$elapsed\n";
407 elsif ( defined $test{skip_all} and length $test{skip_all} ) {
408 print "skipped\n all skipped: $test{skip_all}\n";
409 $tot{skipped}++;
411 else {
412 print "skipped\n all skipped: no reason given\n";
413 $tot{skipped}++;
415 $tot{good}++;
417 else {
418 # List unrun tests as failures.
419 if ($test{'next'} <= $test{max}) {
420 push @{$test{failed}}, $test{'next'}..$test{max};
422 # List overruns as failures.
423 else {
424 my $details = $results{details};
425 foreach my $overrun ($test{max}+1..@$details) {
426 next unless ref $details->[$overrun-1];
427 push @{$test{failed}}, $overrun
431 if ($wstatus) {
432 $failedtests{$tfile} = _dubious_return(\%test, \%tot,
433 $estatus, $wstatus);
434 $failedtests{$tfile}{name} = $tfile;
436 elsif($results{seen}) {
437 if (@{$test{failed}} and $test{max}) {
438 my ($txt, $canon) = _canonfailed($test{max},$test{skipped},
439 @{$test{failed}});
440 print "$test{ml}$txt";
441 $failedtests{$tfile} = { canon => $canon,
442 max => $test{max},
443 failed => scalar @{$test{failed}},
444 name => $tfile,
445 percent => 100*(scalar @{$test{failed}})/$test{max},
446 estat => '',
447 wstat => '',
450 else {
451 print "Don't know which tests failed: got $test{ok} ok, ".
452 "expected $test{max}\n";
453 $failedtests{$tfile} = { canon => '??',
454 max => $test{max},
455 failed => '??',
456 name => $tfile,
457 percent => undef,
458 estat => '',
459 wstat => '',
462 $tot{bad}++;
464 else {
465 print "FAILED before any test output arrived\n";
466 $tot{bad}++;
467 $failedtests{$tfile} = { canon => '??',
468 max => '??',
469 failed => '??',
470 name => $tfile,
471 percent => undef,
472 estat => '',
473 wstat => '',
478 if (defined $Files_In_Dir) {
479 my @new_dir_files = _globdir $Files_In_Dir;
480 if (@new_dir_files != @dir_files) {
481 my %f;
482 @f{@new_dir_files} = (1) x @new_dir_files;
483 delete @f{@dir_files};
484 my @f = sort keys %f;
485 print "LEAKED FILES: @f\n";
486 @dir_files = @new_dir_files;
489 } # foreach test
490 $tot{bench} = timediff(new Benchmark, $run_start_time);
492 $Strap->_restore_PERL5LIB;
494 return(\%tot, \%failedtests);
497 =item B<_mk_leader>
499 my($leader, $ml) = _mk_leader($test_file, $width);
501 Generates the 't/foo........' leader for the given C<$test_file> as well
502 as a similar version which will overwrite the current line (by use of
503 \r and such). C<$ml> may be empty if Test::Harness doesn't think you're
504 on TTY.
506 The C<$width> is the width of the "yada/blah.." string.
508 =cut
510 sub _mk_leader {
511 my($te, $width) = @_;
512 chomp($te);
513 $te =~ s/\.\w+$/./;
515 if ($^O eq 'VMS') {
516 $te =~ s/^.*\.t\./\[.t./s;
518 my $leader = "$te" . '.' x ($width - length($te));
519 my $ml = "";
521 if ( -t STDOUT and not $ENV{HARNESS_NOTTY} and not $Verbose ) {
522 $ml = "\r" . (' ' x 77) . "\r$leader"
525 return($leader, $ml);
528 =item B<_leader_width>
530 my($width) = _leader_width(@test_files);
532 Calculates how wide the leader should be based on the length of the
533 longest test name.
535 =cut
537 sub _leader_width {
538 my $maxlen = 0;
539 my $maxsuflen = 0;
540 foreach (@_) {
541 my $suf = /\.(\w+)$/ ? $1 : '';
542 my $len = length;
543 my $suflen = length $suf;
544 $maxlen = $len if $len > $maxlen;
545 $maxsuflen = $suflen if $suflen > $maxsuflen;
547 # + 3 : we want three dots between the test name and the "ok"
548 return $maxlen + 3 - $maxsuflen;
552 sub _show_results {
553 my($tot, $failedtests) = @_;
555 my $pct;
556 my $bonusmsg = _bonusmsg($tot);
558 if (_all_ok($tot)) {
559 print "All tests successful$bonusmsg.\n";
561 elsif (!$tot->{tests}){
562 die "FAILED--no tests were run for some reason.\n";
564 elsif (!$tot->{max}) {
565 my $blurb = $tot->{tests}==1 ? "script" : "scripts";
566 die "FAILED--$tot->{tests} test $blurb could be run, ".
567 "alas--no output ever seen\n";
569 else {
570 $pct = sprintf("%.2f", $tot->{good} / $tot->{tests} * 100);
571 my $percent_ok = 100*$tot->{ok}/$tot->{max};
572 my $subpct = sprintf " %d/%d subtests failed, %.2f%% okay.",
573 $tot->{max} - $tot->{ok}, $tot->{max},
574 $percent_ok;
576 my($fmt_top, $fmt) = _create_fmts($failedtests);
578 # Now write to formats
579 for my $script (sort keys %$failedtests) {
580 $Curtest = $failedtests->{$script};
581 write;
583 if ($tot->{bad}) {
584 $bonusmsg =~ s/^,\s*//;
585 print "$bonusmsg.\n" if $bonusmsg;
586 die "Failed $tot->{bad}/$tot->{tests} test scripts, $pct% okay.".
587 "$subpct\n";
591 printf("Files=%d, Tests=%d, %s\n",
592 $tot->{files}, $tot->{max}, timestr($tot->{bench}, 'nop'));
596 my %Handlers = (
597 header => \&header_handler,
598 test => \&test_handler,
599 bailout => \&bailout_handler,
602 $Strap->{callback} = \&strap_callback;
603 sub strap_callback {
604 my($self, $line, $type, $totals) = @_;
605 print $line if $Verbose;
607 my $meth = $Handlers{$type};
608 $meth->($self, $line, $type, $totals) if $meth;
612 sub header_handler {
613 my($self, $line, $type, $totals) = @_;
615 warn "Test header seen more than once!\n" if $self->{_seen_header};
617 $self->{_seen_header}++;
619 warn "1..M can only appear at the beginning or end of tests\n"
620 if $totals->{seen} &&
621 $totals->{max} < $totals->{seen};
624 sub test_handler {
625 my($self, $line, $type, $totals) = @_;
627 my $curr = $totals->{seen};
628 my $next = $self->{'next'};
629 my $max = $totals->{max};
630 my $detail = $totals->{details}[-1];
632 if( $detail->{ok} ) {
633 _print_ml_less("ok $curr/$max");
635 if( $detail->{type} eq 'skip' ) {
636 $totals->{skip_reason} = $detail->{reason}
637 unless defined $totals->{skip_reason};
638 $totals->{skip_reason} = 'various reasons'
639 if $totals->{skip_reason} ne $detail->{reason};
642 else {
643 _print_ml("NOK $curr");
646 if( $curr > $next ) {
647 print "Test output counter mismatch [test $curr]\n";
649 elsif( $curr < $next ) {
650 print "Confused test output: test $curr answered after ".
651 "test ", $next - 1, "\n";
656 sub bailout_handler {
657 my($self, $line, $type, $totals) = @_;
659 die "FAILED--Further testing stopped" .
660 ($self->{bailout_reason} ? ": $self->{bailout_reason}\n" : ".\n");
664 sub _print_ml {
665 print join '', $ML, @_ if $ML;
669 # Print updates only once per second.
670 sub _print_ml_less {
671 my $now = CORE::time;
672 if ( $Last_ML_Print != $now ) {
673 _print_ml(@_);
674 $Last_ML_Print = $now;
678 sub _bonusmsg {
679 my($tot) = @_;
681 my $bonusmsg = '';
682 $bonusmsg = (" ($tot->{bonus} subtest".($tot->{bonus} > 1 ? 's' : '').
683 " UNEXPECTEDLY SUCCEEDED)")
684 if $tot->{bonus};
686 if ($tot->{skipped}) {
687 $bonusmsg .= ", $tot->{skipped} test"
688 . ($tot->{skipped} != 1 ? 's' : '');
689 if ($tot->{sub_skipped}) {
690 $bonusmsg .= " and $tot->{sub_skipped} subtest"
691 . ($tot->{sub_skipped} != 1 ? 's' : '');
693 $bonusmsg .= ' skipped';
695 elsif ($tot->{sub_skipped}) {
696 $bonusmsg .= ", $tot->{sub_skipped} subtest"
697 . ($tot->{sub_skipped} != 1 ? 's' : '')
698 . " skipped";
701 return $bonusmsg;
704 # Test program go boom.
705 sub _dubious_return {
706 my($test, $tot, $estatus, $wstatus) = @_;
707 my ($failed, $canon, $percent) = ('??', '??');
709 printf "$test->{ml}dubious\n\tTest returned status $estatus ".
710 "(wstat %d, 0x%x)\n",
711 $wstatus,$wstatus;
712 print "\t\t(VMS status is $estatus)\n" if $^O eq 'VMS';
714 $tot->{bad}++;
716 if ($test->{max}) {
717 if ($test->{'next'} == $test->{max} + 1 and not @{$test->{failed}}) {
718 print "\tafter all the subtests completed successfully\n";
719 $percent = 0;
720 $failed = 0; # But we do not set $canon!
722 else {
723 push @{$test->{failed}}, $test->{'next'}..$test->{max};
724 $failed = @{$test->{failed}};
725 (my $txt, $canon) = _canonfailed($test->{max},$test->{skipped},@{$test->{failed}});
726 $percent = 100*(scalar @{$test->{failed}})/$test->{max};
727 print "DIED. ",$txt;
731 return { canon => $canon, max => $test->{max} || '??',
732 failed => $failed,
733 percent => $percent,
734 estat => $estatus, wstat => $wstatus,
739 sub _create_fmts {
740 my($failedtests) = @_;
742 my $failed_str = "Failed Test";
743 my $middle_str = " Stat Wstat Total Fail Failed ";
744 my $list_str = "List of Failed";
746 # Figure out our longest name string for formatting purposes.
747 my $max_namelen = length($failed_str);
748 foreach my $script (keys %$failedtests) {
749 my $namelen = length $failedtests->{$script}->{name};
750 $max_namelen = $namelen if $namelen > $max_namelen;
753 my $list_len = $Columns - length($middle_str) - $max_namelen;
754 if ($list_len < length($list_str)) {
755 $list_len = length($list_str);
756 $max_namelen = $Columns - length($middle_str) - $list_len;
757 if ($max_namelen < length($failed_str)) {
758 $max_namelen = length($failed_str);
759 $Columns = $max_namelen + length($middle_str) + $list_len;
763 my $fmt_top = "format STDOUT_TOP =\n"
764 . sprintf("%-${max_namelen}s", $failed_str)
765 . $middle_str
766 . $list_str . "\n"
767 . "-" x $Columns
768 . "\n.\n";
770 my $fmt = "format STDOUT =\n"
771 . "@" . "<" x ($max_namelen - 1)
772 . " @>> @>>>> @>>>> @>>> ^##.##% "
773 . "^" . "<" x ($list_len - 1) . "\n"
774 . '{ $Curtest->{name}, $Curtest->{estat},'
775 . ' $Curtest->{wstat}, $Curtest->{max},'
776 . ' $Curtest->{failed}, $Curtest->{percent},'
777 . ' $Curtest->{canon}'
778 . "\n}\n"
779 . "~~" . " " x ($Columns - $list_len - 2) . "^"
780 . "<" x ($list_len - 1) . "\n"
781 . '$Curtest->{canon}'
782 . "\n.\n";
784 eval $fmt_top;
785 die $@ if $@;
786 eval $fmt;
787 die $@ if $@;
789 return($fmt_top, $fmt);
792 sub _canonfailed ($$@) {
793 my($max,$skipped,@failed) = @_;
794 my %seen;
795 @failed = sort {$a <=> $b} grep !$seen{$_}++, @failed;
796 my $failed = @failed;
797 my @result = ();
798 my @canon = ();
799 my $min;
800 my $last = $min = shift @failed;
801 my $canon;
802 if (@failed) {
803 for (@failed, $failed[-1]) { # don't forget the last one
804 if ($_ > $last+1 || $_ == $last) {
805 push @canon, ($min == $last) ? $last : "$min-$last";
806 $min = $_;
808 $last = $_;
810 local $" = ", ";
811 push @result, "FAILED tests @canon\n";
812 $canon = join ' ', @canon;
814 else {
815 push @result, "FAILED test $last\n";
816 $canon = $last;
819 push @result, "\tFailed $failed/$max tests, ";
820 if ($max) {
821 push @result, sprintf("%.2f",100*(1-$failed/$max)), "% okay";
823 else {
824 push @result, "?% okay";
826 my $ender = 's' x ($skipped > 1);
827 if ($skipped) {
828 my $good = $max - $failed - $skipped;
829 my $skipmsg = " (less $skipped skipped test$ender: $good okay, ";
830 if ($max) {
831 my $goodper = sprintf("%.2f",100*($good/$max));
832 $skipmsg .= "$goodper%)";
834 else {
835 $skipmsg .= "?%)";
837 push @result, $skipmsg;
839 push @result, "\n";
840 my $txt = join "", @result;
841 ($txt, $canon);
844 =end _private
846 =back
848 =cut
852 __END__
855 =head1 EXPORT
857 C<&runtests> is exported by Test::Harness by default.
859 C<$verbose>, C<$switches> and C<$debug> are exported upon request.
861 =head1 DIAGNOSTICS
863 =over 4
865 =item C<All tests successful.\nFiles=%d, Tests=%d, %s>
867 If all tests are successful some statistics about the performance are
868 printed.
870 =item C<FAILED tests %s\n\tFailed %d/%d tests, %.2f%% okay.>
872 For any single script that has failing subtests statistics like the
873 above are printed.
875 =item C<Test returned status %d (wstat %d)>
877 Scripts that return a non-zero exit status, both C<$? E<gt>E<gt> 8>
878 and C<$?> are printed in a message similar to the above.
880 =item C<Failed 1 test, %.2f%% okay. %s>
882 =item C<Failed %d/%d tests, %.2f%% okay. %s>
884 If not all tests were successful, the script dies with one of the
885 above messages.
887 =item C<FAILED--Further testing stopped: %s>
889 If a single subtest decides that further testing will not make sense,
890 the script dies with this message.
892 =back
894 =head1 ENVIRONMENT VARIABLES THAT TEST::HARNESS SETS
896 Test::Harness sets these before executing the individual tests.
898 =over 4
900 =item C<HARNESS_ACTIVE>
902 This is set to a true value. It allows the tests to determine if they
903 are being executed through the harness or by any other means.
905 =item C<HARNESS_VERSION>
907 This is the version of Test::Harness.
909 =back
911 =head1 ENVIRONMENT VARIABLES THAT AFFECT TEST::HARNESS
913 =over 4
915 =item C<HARNESS_COLUMNS>
917 This value will be used for the width of the terminal. If it is not
918 set then it will default to C<COLUMNS>. If this is not set, it will
919 default to 80. Note that users of Bourne-sh based shells will need to
920 C<export COLUMNS> for this module to use that variable.
922 =item C<HARNESS_COMPILE_TEST>
924 When true it will make harness attempt to compile the test using
925 C<perlcc> before running it.
927 B<NOTE> This currently only works when sitting in the perl source
928 directory!
930 =item C<HARNESS_DEBUG>
932 If true, Test::Harness will print debugging information about itself as
933 it runs the tests. This is different from C<HARNESS_VERBOSE>, which prints
934 the output from the test being run. Setting C<$Test::Harness::Debug> will
935 override this, or you can use the C<-d> switch in the F<prove> utility.
937 =item C<HARNESS_FILELEAK_IN_DIR>
939 When set to the name of a directory, harness will check after each
940 test whether new files appeared in that directory, and report them as
942 LEAKED FILES: scr.tmp 0 my.db
944 If relative, directory name is with respect to the current directory at
945 the moment runtests() was called. Putting absolute path into
946 C<HARNESS_FILELEAK_IN_DIR> may give more predictable results.
948 =item C<HARNESS_IGNORE_EXITCODE>
950 Makes harness ignore the exit status of child processes when defined.
952 =item C<HARNESS_NOTTY>
954 When set to a true value, forces it to behave as though STDOUT were
955 not a console. You may need to set this if you don't want harness to
956 output more frequent progress messages using carriage returns. Some
957 consoles may not handle carriage returns properly (which results in a
958 somewhat messy output).
960 =item C<HARNESS_PERL>
962 Usually your tests will be run by C<$^X>, the currently-executing Perl.
963 However, you may want to have it run by a different executable, such as
964 a threading perl, or a different version.
966 If you're using the F<prove> utility, you can use the C<--perl> switch.
968 =item C<HARNESS_PERL_SWITCHES>
970 Its value will be prepended to the switches used to invoke perl on
971 each test. For example, setting C<HARNESS_PERL_SWITCHES> to C<-W> will
972 run all tests with all warnings enabled.
974 =item C<HARNESS_VERBOSE>
976 If true, Test::Harness will output the verbose results of running
977 its tests. Setting C<$Test::Harness::verbose> will override this,
978 or you can use the C<-v> switch in the F<prove> utility.
980 =back
982 =head1 EXAMPLE
984 Here's how Test::Harness tests itself
986 $ cd ~/src/devel/Test-Harness
987 $ perl -Mblib -e 'use Test::Harness qw(&runtests $verbose);
988 $verbose=0; runtests @ARGV;' t/*.t
989 Using /home/schwern/src/devel/Test-Harness/blib
990 t/base..............ok
991 t/nonumbers.........ok
992 t/ok................ok
993 t/test-harness......ok
994 All tests successful.
995 Files=4, Tests=24, 2 wallclock secs ( 0.61 cusr + 0.41 csys = 1.02 CPU)
997 =head1 SEE ALSO
999 The included F<prove> utility for running test scripts from the command line,
1000 L<Test> and L<Test::Simple> for writing test scripts, L<Benchmark> for
1001 the underlying timing routines, and L<Devel::Cover> for test coverage
1002 analysis.
1004 =head1 TODO
1006 Provide a way of running tests quietly (ie. no printing) for automated
1007 validation of tests. This will probably take the form of a version
1008 of runtests() which rather than printing its output returns raw data
1009 on the state of the tests. (Partially done in Test::Harness::Straps)
1011 Document the format.
1013 Fix HARNESS_COMPILE_TEST without breaking its core usage.
1015 Figure a way to report test names in the failure summary.
1017 Rework the test summary so long test names are not truncated as badly.
1018 (Partially done with new skip test styles)
1020 Add option for coverage analysis.
1022 Trap STDERR.
1024 Implement Straps total_results()
1026 Remember exit code
1028 Completely redo the print summary code.
1030 Implement Straps callbacks. (experimentally implemented)
1032 Straps->analyze_file() not taint clean, don't know if it can be
1034 Fix that damned VMS nit.
1036 HARNESS_TODOFAIL to display TODO failures
1038 Add a test for verbose.
1040 Change internal list of test results to a hash.
1042 Fix stats display when there's an overrun.
1044 Fix so perls with spaces in the filename work.
1046 Keeping whittling away at _run_all_tests()
1048 Clean up how the summary is printed. Get rid of those damned formats.
1050 =head1 BUGS
1052 HARNESS_COMPILE_TEST currently assumes it's run from the Perl source
1053 directory.
1055 Please use the CPAN bug ticketing system at L<http://rt.cpan.org/>.
1056 You can also mail bugs, fixes and enhancements to
1057 C<< <bug-test-harness >> at C<< rt.cpan.org> >>.
1059 =head1 AUTHORS
1061 Either Tim Bunce or Andreas Koenig, we don't know. What we know for
1062 sure is, that it was inspired by Larry Wall's TEST script that came
1063 with perl distributions for ages. Numerous anonymous contributors
1064 exist. Andreas Koenig held the torch for many years, and then
1065 Michael G Schwern.
1067 Current maintainer is Andy Lester C<< <andy at petdance.com> >>.
1069 =head1 COPYRIGHT
1071 Copyright 2002-2005
1072 by Michael G Schwern C<< <schwern at pobox.com> >>,
1073 Andy Lester C<< <andy at petdance.com> >>.
1075 This program is free software; you can redistribute it and/or
1076 modify it under the same terms as Perl itself.
1078 See L<http://www.perl.com/perl/misc/Artistic.html>.
1080 =cut