ModuleBuildBioperl -> Bio::Root::Build
[bioperl-network.git] / t / Test.pm
blob3b450cdbf1f74b7e02f41a2c41a3a281d07de6bc
1 use strict;
2 package Test;
3 use Carp;
4 use vars (qw($VERSION @ISA @EXPORT @EXPORT_OK $ntest $TestLevel), #public-ish
5 qw($TESTOUT $ONFAIL %todo %history $planned @FAILDETAIL)); #private-ish
6 $VERSION = '1.15';
7 require Exporter;
8 @ISA=('Exporter');
9 @EXPORT=qw(&plan &ok &skip);
10 @EXPORT_OK=qw($ntest $TESTOUT);
12 $TestLevel = 0; # how many extra stack frames to skip
13 $|=1;
14 #$^W=1; ?
15 $ntest=1;
16 $TESTOUT = *STDOUT{IO};
18 # Use of this variable is strongly discouraged. It is set mainly to
19 # help test coverage analyzers know which test is running.
20 $ENV{REGRESSION_TEST} = $0;
22 sub plan {
23 croak "Test::plan(%args): odd number of arguments" if @_ & 1;
24 croak "Test::plan(): should not be called more than once" if $planned;
25 my $max=0;
26 for (my $x=0; $x < @_; $x+=2) {
27 my ($k,$v) = @_[$x,$x+1];
28 if ($k =~ /^test(s)?$/) { $max = $v; }
29 elsif ($k eq 'todo' or
30 $k eq 'failok') { for (@$v) { $todo{$_}=1; }; }
31 elsif ($k eq 'onfail') {
32 ref $v eq 'CODE' or croak "Test::plan(onfail => $v): must be CODE";
33 $ONFAIL = $v;
35 else { carp "Test::plan(): skipping unrecognized directive '$k'" }
37 my @todo = sort { $a <=> $b } keys %todo;
38 if (@todo) {
39 print $TESTOUT "1..$max todo ".join(' ', @todo).";\n";
40 } else {
41 print $TESTOUT "1..$max\n";
43 ++$planned;
46 sub to_value {
47 my ($v) = @_;
48 (ref $v or '') eq 'CODE' ? $v->() : $v;
51 sub ok ($;$$) {
52 croak "ok: plan before you test!" if !$planned;
53 my ($pkg,$file,$line) = caller($TestLevel);
54 my $repetition = ++$history{"$file:$line"};
55 my $context = ("$file at line $line".
56 ($repetition > 1 ? " fail \#$repetition" : ''));
57 my $ok=0;
58 my $result = to_value(shift);
59 my ($expected,$diag);
60 if (@_ == 0) {
61 $ok = $result;
62 } else {
63 $expected = to_value(shift);
64 my ($regex,$ignore);
65 if (!defined $expected) {
66 $ok = !defined $result;
67 } elsif (!defined $result) {
68 $ok = 0;
69 } elsif ((ref($expected)||'') eq 'Regexp') {
70 $ok = $result =~ /$expected/;
71 } elsif (($regex) = ($expected =~ m,^ / (.+) / $,sx) or
72 ($ignore, $regex) = ($expected =~ m,^ m([^\w\s]) (.+) \1 $,sx)) {
73 $ok = $result =~ /$regex/;
74 } else {
75 $ok = $result eq $expected;
78 my $todo = $todo{$ntest};
79 if ($todo and $ok) {
80 $context .= ' TODO?!' if $todo;
81 print $TESTOUT "ok $ntest # ($context)\n";
82 } else {
83 print $TESTOUT "not " if !$ok;
84 print $TESTOUT "ok $ntest\n";
86 if (!$ok) {
87 my $detail = { 'repetition' => $repetition, 'package' => $pkg,
88 'result' => $result, 'todo' => $todo };
89 $$detail{expected} = $expected if defined $expected;
90 $diag = $$detail{diagnostic} = to_value(shift) if @_;
91 $context .= ' *TODO*' if $todo;
92 if (!defined $expected) {
93 if (!$diag) {
94 print $TESTOUT "# Failed test $ntest in $context\n";
95 } else {
96 print $TESTOUT "# Failed test $ntest in $context: $diag\n";
98 } else {
99 my $prefix = "Test $ntest";
100 print $TESTOUT "# $prefix got: ".
101 (defined $result? "'$result'":'<UNDEF>')." ($context)\n";
102 $prefix = ' ' x (length($prefix) - 5);
103 if ((ref($expected)||'') eq 'Regexp') {
104 $expected = 'qr/'.$expected.'/'
105 } else {
106 $expected = "'$expected'";
108 if (!$diag) {
109 print $TESTOUT "# $prefix Expected: $expected\n";
110 } else {
111 print $TESTOUT "# $prefix Expected: $expected ($diag)\n";
114 push @FAILDETAIL, $detail;
117 ++ $ntest;
118 $ok;
121 sub skip ($$;$$) {
122 my $whyskip = to_value(shift);
123 if ($whyskip) {
124 $whyskip = 'skip' if $whyskip =~ m/^\d+$/;
125 print $TESTOUT "ok $ntest # $whyskip\n";
126 ++ $ntest;
128 } else {
129 local($TestLevel) = $TestLevel+1; #ignore this stack frame
130 &ok;
134 END {
135 $ONFAIL->(\@FAILDETAIL) if @FAILDETAIL && $ONFAIL;
139 __END__
141 =head1 NAME
143 Test - provides a simple framework for writing test scripts
145 =head1 SYNOPSIS
147 use strict;
148 use Test;
150 # use a BEGIN block so we print our plan before MyModule is loaded
151 BEGIN { plan tests => 14, todo => [3,4] }
153 # load your module...
154 use MyModule;
156 ok(0); # failure
157 ok(1); # success
159 ok(0); # ok, expected failure (see todo list, above)
160 ok(1); # surprise success!
162 ok(0,1); # failure: '0' ne '1'
163 ok('broke','fixed'); # failure: 'broke' ne 'fixed'
164 ok('fixed','fixed'); # success: 'fixed' eq 'fixed'
165 ok('fixed',qr/x/); # success: 'fixed' =~ qr/x/
167 ok(sub { 1+1 }, 2); # success: '2' eq '2'
168 ok(sub { 1+1 }, 3); # failure: '2' ne '3'
169 ok(0, int(rand(2)); # (just kidding :-)
171 my @list = (0,0);
172 ok @list, 3, "\@list=".join(',',@list); #extra diagnostics
173 ok 'segmentation fault', '/(?i)success/'; #regex match
175 skip($feature_is_missing, ...); #do platform specific test
177 =head1 DESCRIPTION
179 L<Test::Harness|Test::Harness> expects to see particular output when it
180 executes tests. This module aims to make writing proper test scripts just
181 a little bit easier (and less error prone :-).
183 =head1 TEST TYPES
185 =over 4
187 =item * NORMAL TESTS
189 These tests are expected to succeed. If they don't something's
190 screwed up!
192 =item * SKIPPED TESTS
194 Skip is for tests that might or might not be possible to run depending
195 on the availability of platform specific features. The first argument
196 should evaluate to true (think "yes, please skip") if the required
197 feature is not available. After the first argument, skip works
198 exactly the same way as do normal tests.
200 =item * TODO TESTS
202 TODO tests are designed for maintaining an B<executable TODO list>.
203 These tests are expected NOT to succeed. If a TODO test does succeed,
204 the feature in question should not be on the TODO list, now should it?
206 Packages should NOT be released with succeeding TODO tests. As soon
207 as a TODO test starts working, it should be promoted to a normal test
208 and the newly working feature should be documented in the release
209 notes or change log.
211 =back
213 =head1 RETURN VALUE
215 Both C<ok> and C<skip> return true if their test succeeds and false
216 otherwise in a scalar context.
218 =head1 ONFAIL
220 BEGIN { plan test => 4, onfail => sub { warn "CALL 911!" } }
222 While test failures should be enough, extra diagnostics can be
223 triggered at the end of a test run. C<onfail> is passed an array ref
224 of hash refs that describe each test failure. Each hash will contain
225 at least the following fields: C<package>, C<repetition>, and
226 C<result>. (The file, line, and test number are not included because
227 their correspondence to a particular test is tenuous.) If the test
228 had an expected value or a diagnostic string, these will also be
229 included.
231 The B<optional> C<onfail> hook might be used simply to print out the
232 version of your package and/or how to report problems. It might also
233 be used to generate extremely sophisticated diagnostics for a
234 particularly bizarre test failure. However it's not a panacea. Core
235 dumps or other unrecoverable errors prevent the C<onfail> hook from
236 running. (It is run inside an C<END> block.) Besides, C<onfail> is
237 probably over-kill in most cases. (Your test code should be simpler
238 than the code it is testing, yes?)
240 =head1 SEE ALSO
242 L<Test::Harness> and, perhaps, test coverage analysis tools.
244 =head1 AUTHOR
246 Copyright (c) 1998-1999 Joshua Nathaniel Pritikin. All rights reserved.
248 This package is free software and is provided "as is" without express
249 or implied warranty. It may be used, redistributed and/or modified
250 under the terms of the Perl Artistic License (see
251 http://www.perl.com/perl/misc/Artistic.html)
253 =cut