2 # Copyright (C) Lumiera.org
3 # 2007, 2008, 2009, 2010, Christian Thaeter <ct@pipapo.org>
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License as
7 # published by the Free Software Foundation; either version 2 of the
8 # License, or (at your option) any later version.
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
15 # You should have received a copy of the GNU General Public License
16 # along with this program; if not, write to the Free Software
17 # Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 # TESTMODE=FULL yet unimplemented
20 # run all tests, PLANNED which fail count as error
23 # run only tests which recently failed
26 # stop testing on the first failure
30 #intro Christian Thäter
32 #intro A shell script driving software tests.
53 #config HEAD- Configuration; configuration; configure tests
55 #config PARA LOGSUPPRESS; LOGSUPPRESS; suppress certain lines from stderr
56 #config LOGSUPPRESS='^\(\*\*[0-9]*\*\* \)\?[0-9]\{10,\}: \(TRACE\|INFO\|NOTICE\|WARNING\|ERR\):'
58 #config Programms sometimes emit additional diagnostics on stderr which is volatile and not necessary for
59 #config validating the output the `LOGSUPRESS` variable can be set to a regex to filter this things out.
60 #config The default as shown above filters some NoBug annotations and non fatal logging out.
62 LOGSUPPRESS
='^\(\*\*[0-9]*\*\* \)\?[0-9]\{10,\}[:!] \(TRACE\|INFO\|NOTICE\|WARNING\|ERR\):'
64 #config PARA Resource Limits; ulimit; constrain resource limits
65 #config It is possible to set some limits for tests to protect the system against really broken cases.
66 #config Since running under valgrind takes consinderable more resources there are separate variants for
67 #config limits when running under valgrind.
70 #config Maximal CPU time the test may take after it will be killed with SIGXCPU. This protects agaist Lifelocks.
73 #config Maximal wall-time a test may take after this it will be killed with SIGKILL. Protects against Deadlocks.
75 #config LIMIT_VSZ=524288
76 #config Maximal virtual memory size the process may map, allocations/mappings will fail when this limit is reached.
77 #config Protects against memory leaks.
79 #config LIMIT_VG_CPU=20
80 #config LIMIT_VG_TIME=30
81 #config LIMIT_VG_VSZ=524288
82 #config Same variables again with limits when running under valgrind.
92 #configf HEAD~ Configuration Files; configuration files; define variables to configure the test
94 #configf `test.sh` reads config files from the following location if they are exist
95 #configf * 'test.conf' from the current directory
96 #configf * '$srcdir/test.conf' `$srcdir` is set by autotools
97 #configf * '$srcdir/tests/test.conf' `tests/` is suspected as default directory for tests
98 #configf * '$TEST_CONF' a user defineable variable to point to a config file
100 test -f 'test.conf' && source test.conf
101 test -n "$srcdir" -a -e "$srcdir/test.conf" && source "$srcdir/test.conf"
102 test -n "$srcdir" -a -e "$srcdir/tests/test.conf" && source "$srcdir/tests/test.conf"
103 test -n "$TEST_CONF" -a -e "$TEST_CONF" && source "$TEST_CONF"
108 TESTDIR
="$(dirname "$arg0")"
111 #libtool HEAD Libtool; libtool; support for libtool
112 #libtool When test.sh detects the presence of './libtool' it runs all tests with
113 #libtool `./libtool --mode=execute`.
116 if test -x .
/libtool
; then
117 LIBTOOL_EX
="./libtool --mode=execute"
120 #valgrind HEAD- Valgrind; valgrind; valgrind support
121 #valgrind Test are run under valgrind supervision by default, if not disabled.
123 #valgrind PARA VALGRINDFLAGS; VALGRINDFLAGS; control valgrind options
124 #valgrind VALGRINDFLAGS="--leak-check=yes --show-reachable=yes"
126 #valgrind `VALGRINDFLAGS` define the options which are passed to valgrind. This can be used to override
127 #valgrind the defaults or switching the valgrind tool. The special case `VALGRINDFLAGS=DISABLE` will disable
128 #valgrind valgrind for the tests.
130 #valgrind HEAD~ Generating Valgrind Suppression Files; vgsuppression; ignore false positives
131 #valgrind When there is a 'vgsuppression' executable in the current dir (build by something external) then
132 #valgrind test.sh uses this to generate a local 'vgsuppression.supp' file and uses that to suppress
133 #valgrind all errors generated by 'vgsuppression'. The Idea here is that one adds code which triggers known
134 #valgrind false positives in 'vgsuppression'. Care must be taken that this file is simple and does
135 #valgrind not generate true positives.
137 ulimit -S -t ${LIMIT_CPU:-5} -v ${LIMIT_VSZ:-524288}
139 LIMIT_TIME_REAL
="$LIMIT_TIME"
140 if [ "$VALGRINDFLAGS" = 'DISABLE' ]; then
141 echo "valgrind explicit disabled"
143 if [ "$(which valgrind)" ]; then
144 ulimit -S -t ${ULIMIT_VG_CPU:-20} -v ${ULIMIT_VG_VSZ:-524288}
145 LIMIT_TIME_REAL
="$LIMIT_VG_TIME"
146 if [[ -x 'vgsuppression' ]]; then
147 if [[ 'vgsuppression' -nt 'vgsuppression.supp' ]]; then
148 echo 'generating valgrind supression file'
150 $LIBTOOL_EX $
(which valgrind
) ${VALGRINDFLAGS:---leak-check=yes --show-reachable=yes} -q --gen-suppressions=all vgsuppression
2>&1 \
151 |
awk '/^{/ {i = 1;} /^}/ {i = 0; print $0;} {if (i == 1) print $0;}' >vgsuppression.supp
153 valgrind
="$(which valgrind) ${VALGRINDFLAGS:---leak-check=yes --show-reachable=no} --suppressions=vgsuppression.supp -q"
155 valgrind
="$(which valgrind) ${VALGRINDFLAGS:---leak-check=yes --show-reachable=no -q}"
158 echo "no valgrind found, go without it"
163 echo "================ ${0##*/} ================"
170 # the old testlog if existing will be used to check for previous test states
171 if test -f ,testlog
; then
172 mv ,testlog
,testlog.pre
179 function compare_template
() # template plainfile
187 IFS
='' read -u 3 -r template ||
return 0
188 IFS
='' read -u 4 -r line ||
{ echo "no output"; return 1; }
190 local cmd
="${template%%:*}:"
191 local arg
="${template#*: }"
195 if [[ $line =~
$arg ]]; then
196 IFS
='' read -u 4 -r line ||
197 if IFS
='' read -u 3 -r template
; then
198 echo "premature end in output, expecting $template:$templateno"
206 if [[ $
((++miss
)) -gt 1 ]]; then
207 echo -e "'$line':$lineno\ndoes not match\n$template:$templateno"
210 IFS
='' read -u 3 -r template ||
{ echo "more output than expected: '$line':$lineno"; return 1; }
215 if [[ "$line" = "$arg" ]]; then
216 IFS
='' read -u 3 -r template
&& IFS
='' read -u 4 -r line ||
{
220 echo -e "'$line':$lineno\ndoes not match\n$template:$templateno"
225 echo "UNKOWN MATCH COMMAND '$cmd'" 1>&2
233 #tests HEAD- Writing Tests; tests; how to write testsuites
234 #tests Tests are nothing more than bash scripts with some functions from the test.sh
235 #tests framework defined. Test.sh looks in the current directory for all files which ending in .test
236 #tests and runs them in alphabetical order. The selection of this tests can be constrained with the
237 #tests `TESTSUITES` environment variable.
239 #tests HEAD~ Testsuites; test files; writing tests
240 #tests It is common to start the name of the '.test' files with a 2 digi number to give them a proper
241 #tests order: '10foo.test', '20bar.test' and so on. Each such test should only test a certain aspect of
242 #tests the system. You have to select the testing binary with the `TESTING` function and then write
243 #tests certain TEST's defining how the test should react. Since tests are shell scripts it is possible
244 #tests to add some supplemental commands there to set and clean up the given test environment.
246 #tests HEAD^ TESTING; TESTING; set the test binary
247 #tests TESTING "message" test_program
249 #tests Selects the test binary for the follwing tests, prints an informal message.
252 #tests message to be printed
253 #tests `test_program`::
254 #tests an existing program to drive the tests or a shell function
257 #tests TESTING "Testing a.out" ./a.out
264 echo -e "\n#### $1, $TESTFILE, $2" >>,testlog
269 #tests HEAD^ TEST; TEST; single test
270 #tests TEST "title" arguments.. <<END
272 #tests Runs a single test
275 #tests describes this test and is also used as identifier for this test,
276 #tests must be unique for all your tests
278 #tests the following arguments are passed to the test program
279 #tests `<<END .. END`::
280 #tests a list of control commands expected in and outputs is given as 'heredoc'.
282 #tests Each line of the test specification in the heredoc starts with an arbitary number of spaces
283 #tests followed by a command, followed by a colon and a space, followed by additional arguments or
284 #tests being an empty or comment line.
287 #tests HEAD+ Test Commands; commands; define expected in and outputs
289 #tests PARA in; in; stdin data for a test
292 #tests Send `text` to stdin of the test binary. If no `in:` commands are given, nothing is send to the
295 #tests PARA out; out; expected stdout (regex) from a test
298 #tests Expect `regex` on stdout. This regexes have a 'triggering' semantic. That means it is tried to match
299 #tests a given regex on as much lines as possible (`.*` will match any remaining output), if the match fails,
300 #tests the next expected output line is tried. When that fails too the test is aborted and counted as failure.
302 #tests When no `out:` or `out-lit:` commands are given, then stdout is not checked, any output is ignored.
304 #tests PARA err; err; expected stderr (regex) from a test
307 #tests Same as 'out:' but expects data on stderr. When no `err:` or `err-lit:` commands are given, then stdout is
308 #tests not checked, any output there is ignored.
310 #tests PARA out-lit; out-lit; expected stdout (literal) from a test
313 #tests Expect `text` on stdout, must match exactly or will fail.
315 #tests PARA err-lit; err-lit; expected stderr (literal) from a test
318 #tests Same as 'out-lit:' but expects data on stderr.
320 #tests PARA return; return; expected exit value of a test
323 #tests Expects `value` as exit code of the tested program. The check can be negated by prepending the value with
324 #tests an exclamation mark, `return: !0` expects any exist code except zero.
326 #tests If no `return:` command is given then a zero (success) return from the test program is expected.
328 #tests HEAD+ Conditional Tests; conditional tests; switch tests on conditions
329 #tests Sometimes tests need to be adapted to the environment/platform they are running on. This can be archived
330 #tests with common if-else-elseif-endif statements. This statements can be nested.
332 #tests PARA if; if; conditional test
335 #tests Executes `check` as shell command, if its return is zero (success) then the following test parts are used.
337 #tests PARA else; else; conditional alternative
340 #tests If the previous `if` failed then the following test parts are included in the test, otherwise they
343 #tests PARA elseif; elseif; conditional alternative with test
346 #tests Composition of else and if, only includes the following test parts if the if's and elseif's before failed
347 #tests and `check` succeeded.
349 #tests PARA endif; endif; end of conditonal test part
352 #tests Ends an `if` statement.
354 #tests HEAD+ Other Elements;;
356 #tests PARA msg; msg; print a diagnostic message
357 #tests msg: message..
359 #tests Prints `message` while processing the test suite.
361 #tests PARA comments; comments; adding comments to tests
364 #tests Lines starting with the hash mark and empty lines count as comment and are not used.
375 local valgrind
="$valgrind"
376 if [ "$VALGRINDFLAGS" = 'DISABLE' ]; then
381 while read -r line
; do
382 local cmd
="${line%%:*}:"
383 local arg
="${line#*: }"
385 if [[ ! "$arg" ]]; then
392 condstack
="1$condstack"
394 condstack
="0$condstack"
398 if [[ "${condstack:0:1}" = "0" ]]; then
400 condstack
="1${condstack:1}"
402 condstack
="0${condstack:1}"
405 condstack
="2${condstack:1}"
409 if [[ "${condstack:0:1}" != "0" ]]; then
410 condstack
="0${condstack:1}"
412 condstack
="1${condstack:1}"
416 condstack
="${condstack:1}"
419 if [[ "${condstack:0:1}" = "1" ]]; then
425 echo "$arg" >>,send_stdin
428 echo "regex_cont: $arg" >>,expect_stdout
431 echo "regex_cont: $arg" >>,expect_stderr
434 echo "literal: $arg" >>,expect_stdout
437 echo "literal: $arg" >>,expect_stderr
446 echo "UNKOWN TEST COMMAND '$cmd'" 1>&2
454 echo -n "TEST $name: "
455 echo -en "\nTEST $name: $* " >>,testlog
459 if grep "^TEST $name: .* FAILED" ,testlog.pre
>&/dev
/null
; then
461 MSGFAIL
=" (still broken)"
462 elif grep "^TEST $name: .* \\(SKIPPED (ok)\\|OK\\)" ,testlog.pre
>&/dev
/null
; then
463 echo ".. SKIPPED (ok)"
464 echo ".. SKIPPED (ok)" >>,testlog
465 SKIPCNT
=$
(($SKIPCNT + 1))
466 TESTCNT
=$
(($TESTCNT + 1))
479 TESTCNT
=$
(($TESTCNT + 1))
486 if declare -F |
grep $TESTBIN >&/dev
/null
; then
488 elif test -x $TESTBIN; then
489 CALL
="env $LIBTOOL_EX $valgrind"
493 echo "test binary '$TESTBIN' not found" >,stderr
497 if test "$CALL" != '-'; then
498 if test -f ,send_stdin
; then
500 $CALL $TESTBIN "$@" <,send_stdin
2>,stderr
>,stdout
505 $CALL $TESTBIN "$@" 2>,stderr
>,stdout
512 ( sleep $LIMIT_TIME_REAL && kill -KILL $pid ) &>/dev
/null
&
516 if [[ "$return" -le 128 ]]; then
517 kill -INT $wpid >&/dev
/null
520 if test -f ,expect_stdout
; then
521 grep -v "$LOGSUPPRESS" <,stdout
>,tmp
522 if ! compare_template
,expect_stdout
,tmp
>>,cmptmp
; then
523 echo "unexpected data on stdout" >>,testtmp
524 cat ,cmptmp
>>,testtmp
530 if test -f ,expect_stderr
; then
531 grep -v "$LOGSUPPRESS" <,stderr
>,tmp
533 if ! compare_template
,expect_stderr
,tmp
>>,cmptmp
; then
534 echo "unexpected data on stderr" >>,testtmp
535 cat ,cmptmp
>>,testtmp
541 if [[ "${expect_return:0:1}" = '!' ]]; then
542 if [[ "${expect_return#\!}" = "$return" ]]; then
543 echo "unexpected return value $return, expected $expect_return" >>,testtmp
547 if [[ "${expect_return}" != "$return" ]]; then
548 echo "unexpected return value $return, expected $expect_return" >>,testtmp
554 if test $fails -eq 0; then
556 echo ".. OK$MSGOK" >>,testlog
558 echo ".. FAILED$MSGFAIL";
559 echo ".. FAILED$MSGFAIL" >>,testlog
560 cat ,testtmp
>>,testlog
562 echo "stderr was:" >>,testlog
563 cat ,stderr
>>,testlog
565 FAILCNT
=$
(($FAILCNT + 1))
574 #tests HEAD^ PLANNED; PLANNED; deactivated test
575 #tests PLANNED "title" arguments.. <<END
577 #tests Skip a single test.
580 #tests describes this test and is also used as identifier for this test,
581 #tests must be unique for all your tests
583 #tests the following arguments are passed to the test program
584 #tests `<<END .. END`::
585 #tests a list of control commands expected in and outputs is given as 'heredoc'.
587 #tests `PLANNED` acts as dropin replacement for `TEST`. Each such test is skipped (and counted as skipped)
588 #tests This can be used to specify tests in advance and activate them as soon development goes on or
589 #tests deactivate intentional broken tests to be fixed later.
593 echo -n "PLANNED $1: "
594 echo -en "\nPLANNED $* " >>,testlog
595 echo ".. SKIPPED (planned)"
596 echo ".. SKIPPED (planned)" >>,testlog
597 SKIPCNT
=$
(($SKIPCNT + 1))
598 TESTCNT
=$
(($TESTCNT + 1))
603 if test \
( ! "${TESTSUITES/*,*/}" \
) -a "$TESTSUITES"; then
604 TESTSUITES
="{$TESTSUITES}"
606 for t
in $
(eval echo "$TESTDIR/*$TESTSUITES*.tests"); do
608 done |
sort |
uniq |
{
609 while read TESTFILE
; do
611 echo "### $TESTFILE" >&2
612 if test -f $TESTFILE; then
617 if [ $FAILCNT = 0 ]; then
618 echo " ... PASSED $(($TESTCNT - $SKIPCNT)) TESTS, $SKIPCNT SKIPPED"
621 echo " ... SUCCEEDED $(($TESTCNT - $FAILCNT - $SKIPCNT)) TESTS"
622 echo " ... FAILED $FAILCNT TESTS"
623 echo " ... SKIPPED $SKIPCNT TESTS"
624 echo " see ',testlog' for details"
630 TESTSUITES
="${TESTSUITES}${1:+${TESTSUITES:+,}$1}"