python: getopt: rename SambaOption to Option
[Samba.git] / ctdb / tests / run_tests.sh
blobdfe2a9ab36437f43247e07b2e5c545644437e92c
1 #!/usr/bin/env bash
3 usage() {
4 cat <<EOF
5 Usage: $0 [OPTIONS] [TESTS]
7 Options:
8 -A Use "cat -A" to print test output (only some tests)
9 -c Run integration tests on a cluster
10 -C Clean up when done by removing test state directory (see -V)
11 -D Show diff between failed/expected test output (some tests only)
12 -e Exit on the first test failure
13 -H No headers - for running single test with other wrapper
14 -I <count> Iterate tests <count> times, exiting on failure (implies -e, -N)
15 -l <count> Use <count> daemons for local daemon integration tests
16 -L Print daemon logs on test failure (only some tests)
17 -N Don't print summary of tests results after running all tests
18 -q Quiet - don't show tests being run (still displays summary)
19 -S <lib> Use socket wrapper library <lib> for local integration tests
20 -v Verbose - print test output for non-failures (only some tests)
21 -V <dir> Use <dir> as test state directory
22 -x Trace this script with the -x option
23 -X Trace certain scripts run by tests using -x (only some tests)
24 EOF
25 exit 1
28 # Print a message and exit.
29 die ()
31 echo "$1" >&2 ; exit "${2:-1}"
34 ######################################################################
36 with_summary=true
37 quiet=false
38 exit_on_fail=false
39 max_iterations=1
40 no_header=false
41 test_state_dir=""
42 cleanup=false
43 test_time_limit=3600
45 export CTDB_TEST_VERBOSE=false
46 export CTDB_TEST_COMMAND_TRACE=false
47 export CTDB_TEST_CAT_RESULTS_OPTS=""
48 export CTDB_TEST_DIFF_RESULTS=false
49 export CTDB_TEST_PRINT_LOGS_ON_ERROR=false
50 export CTDB_TEST_LOCAL_DAEMONS=3
51 export CTDB_TEST_SWRAP_SO_PATH=""
53 while getopts "AcCDehHI:l:LNqS:T:vV:xX?" opt ; do
54 case "$opt" in
55 A) CTDB_TEST_CAT_RESULTS_OPTS="-A" ;;
56 c) CTDB_TEST_LOCAL_DAEMONS="" ;;
57 C) cleanup=true ;;
58 D) CTDB_TEST_DIFF_RESULTS=true ;;
59 e) exit_on_fail=true ;;
60 H) no_header=true ;;
61 I) max_iterations="$OPTARG" ; exit_on_fail=true ; with_summary=false ;;
62 l) CTDB_TEST_LOCAL_DAEMONS="$OPTARG" ;;
63 L) CTDB_TEST_PRINT_LOGS_ON_ERROR=true ;;
64 N) with_summary=false ;;
65 q) quiet=true ;;
66 S) CTDB_TEST_SWRAP_SO_PATH="$OPTARG" ;;
67 T) test_time_limit="$OPTARG" ;;
68 v) CTDB_TEST_VERBOSE=true ;;
69 V) test_state_dir="$OPTARG" ;;
70 x) set -x ;;
71 X) CTDB_TEST_COMMAND_TRACE=true ;;
72 \?|h) usage ;;
73 esac
74 done
75 shift $((OPTIND - 1))
77 case $(basename "$0") in
78 *run_cluster_tests*)
79 # Running on a cluster... same as -c
80 CTDB_TEST_LOCAL_DAEMONS=""
82 esac
84 if $quiet ; then
85 show_progress() { cat >/dev/null ; }
86 else
87 show_progress() { cat ; }
90 ######################################################################
92 test_header ()
94 local name="$1"
96 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
97 echo "Running test $name ($(date '+%T'))"
98 echo "--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--==--"
101 test_footer ()
103 local f="$1"
104 local status="$2"
105 local interp="$3"
106 local duration="$4"
108 local statstr=""
109 if [ "$status" -eq 0 ] ; then
110 statstr=""
111 else
112 statstr=" (status $status)"
115 echo "=========================================================================="
116 echo "TEST ${interp}: ${f}${statstr} (duration: ${duration}s)"
117 echo "=========================================================================="
120 ctdb_test_run ()
122 local f="$1"
124 $no_header || test_header "$f"
126 local status=0
127 local start_time
129 start_time=$(date '+%s')
131 if [ -x "$f" ] ; then
132 timeout "$test_time_limit" "$f" </dev/null | show_progress
133 status=$?
134 else
135 echo "TEST IS NOT EXECUTABLE"
136 status=99
139 local duration=$(($(date +%s) - start_time))
141 tests_total=$((tests_total + 1))
143 local interp
144 case "$status" in
146 interp="PASSED"
147 tests_passed=$((tests_passed + 1))
150 interp="SKIPPED"
151 tests_skipped=$((tests_skipped + 1))
154 interp="ERROR"
155 tests_failed=$((tests_failed + 1))
157 124)
158 interp="TIMEDOUT"
159 tests_failed=$((tests_failed + 1))
162 interp="FAILED"
163 tests_failed=$((tests_failed + 1))
165 esac
167 $no_header || test_footer "$f" "$status" "$interp" "$duration"
169 if $with_summary ; then
170 local t
171 if [ $status -eq 0 ] ; then
172 t=" ${interp}"
173 else
174 t="*${interp}*"
176 printf '%-10s %s\n' "$t" "$f" >>"$summary_file"
179 # Skipped tests should not cause failure
180 case "$status" in
182 status=0
184 esac
186 return $status
189 ######################################################################
191 tests_total=0
192 tests_passed=0
193 tests_skipped=0
194 tests_failed=0
196 if ! type mktemp >/dev/null 2>&1 ; then
197 # Not perfect, but it will do...
198 mktemp ()
200 local dir=false
201 if [ "$1" = "-d" ] ; then
202 dir=true
204 local t="${TMPDIR:-/tmp}/tmp.$$.$RANDOM"
206 umask 077
207 if $dir ; then
208 mkdir "$t"
209 else
210 : >"$t"
213 echo "$t"
217 set -o pipefail
219 run_one_test ()
221 local f="$1"
223 CTDB_TEST_SUITE_DIR=$(dirname "$f")
224 export CTDB_TEST_SUITE_DIR
225 # This expands the most probable problem cases like "." and "..".
226 if [ "$(dirname "$CTDB_TEST_SUITE_DIR")" = "." ] ; then
227 CTDB_TEST_SUITE_DIR=$(cd "$CTDB_TEST_SUITE_DIR" && pwd)
230 # Set CTDB_TEST_TMP_DIR
232 # Determine the relative test suite subdirectory. The top-level
233 # test directory needs to be a prefix of the test suite directory,
234 # so make absolute versions of both.
235 local test_dir test_suite_dir reldir
236 test_dir=$(cd "$CTDB_TEST_DIR" && pwd)
237 test_suite_dir=$(cd "$CTDB_TEST_SUITE_DIR" && pwd)
238 reldir="${test_suite_dir#"${test_dir}"/}"
240 export CTDB_TEST_TMP_DIR="${test_state_dir}/${reldir}"
241 rm -rf "$CTDB_TEST_TMP_DIR"
242 mkdir -p "$CTDB_TEST_TMP_DIR"
244 ctdb_test_run "$f"
245 status=$?
248 run_tests ()
250 local f
252 for f ; do
253 case "$f" in
254 */README|*/README.md)
255 continue
257 esac
259 if [ ! -e "$f" ] ; then
260 # Can't find it? Check relative to CTDB_TEST_DIR.
261 # Strip off current directory from beginning,
262 # if there, just to make paths more friendly.
263 f="${CTDB_TEST_DIR#"${PWD}"/}/${f}"
266 if [ -d "$f" ] ; then
267 local test_dir dir reldir subtests
269 test_dir=$(cd "$CTDB_TEST_DIR" && pwd)
270 dir=$(cd "$f" && pwd)
271 reldir="${dir#"${test_dir}"/}"
273 case "$reldir" in
274 */*/*)
275 die "test \"$f\" is not recognised"
277 */*)
278 # This is a test suite
279 subtests=$(echo "${f%/}/"*".sh")
280 if [ "$subtests" = "${f%/}/*.sh" ] ; then
281 # Probably empty directory
282 die "test \"$f\" is not recognised"
285 CLUSTER|INTEGRATION|UNIT)
286 # A collection of test suites
287 subtests=$(echo "${f%/}/"*)
290 die "test \"$f\" is not recognised"
291 esac
293 # Recurse - word-splitting wanted
294 # shellcheck disable=SC2086
295 run_tests $subtests
296 elif [ -f "$f" ] ; then
297 run_one_test "$f"
298 else
299 # Time to give up
300 die "test \"$f\" is not recognised"
303 if $exit_on_fail && [ "$status" -ne 0 ] ; then
304 return "$status"
306 done
309 export CTDB_TEST_MODE="yes"
311 # Following 2 lines may be modified by installation script
312 CTDB_TESTS_ARE_INSTALLED=false
313 CTDB_TEST_DIR=$(dirname "$0")
314 export CTDB_TESTS_ARE_INSTALLED CTDB_TEST_DIR
316 if [ -z "$test_state_dir" ] ; then
317 if $CTDB_TESTS_ARE_INSTALLED ; then
318 test_state_dir=$(mktemp -d)
319 else
320 test_state_dir="${CTDB_TEST_DIR}/var"
323 mkdir -p "$test_state_dir"
325 summary_file="${test_state_dir}/.summary"
326 : >"$summary_file"
328 export TEST_SCRIPTS_DIR="${CTDB_TEST_DIR}/scripts"
330 # If no tests specified then run some defaults
331 if [ -z "$1" ] ; then
332 if [ -n "$CTDB_TEST_LOCAL_DAEMONS" ] ; then
333 set -- UNIT INTEGRATION
334 else
335 set -- INTEGRATION CLUSTER
339 do_cleanup ()
341 if $cleanup ; then
342 echo "Removing test state directory: ${test_state_dir}"
343 rm -rf "$test_state_dir"
344 else
345 echo "Not cleaning up test state directory: ${test_state_dir}"
349 trap "do_cleanup ; exit 130" SIGINT
350 trap "do_cleanup ; exit 143" SIGTERM
352 iterations=0
353 # Special case: -I 0 means iterate forever (until failure)
354 while [ "$max_iterations" -eq 0 ] || [ $iterations -lt "$max_iterations" ] ; do
355 iterations=$((iterations + 1))
357 if [ "$max_iterations" -ne 1 ] ; then
358 echo
359 echo "##################################################"
360 echo "ITERATION ${iterations}"
361 echo "##################################################"
362 echo
365 run_tests "$@"
366 status=$?
368 if [ $status -ne 0 ] ; then
369 break
371 done
373 if $with_summary ; then
374 if [ "$status" -eq 0 ] || ! $exit_on_fail ; then
375 echo
376 cat "$summary_file"
378 echo
379 tests_run=$((tests_total - tests_skipped))
380 printf '%d/%d tests passed' $tests_passed $tests_run
381 if [ $tests_skipped -gt 0 ] ; then
382 printf ' (%d skipped)' $tests_skipped
384 printf '\n'
387 rm -f "$summary_file"
389 echo
391 do_cleanup
393 if $no_header || $exit_on_fail ; then
394 exit "$status"
395 elif [ $tests_failed -gt 0 ] ; then
396 exit 1
397 else
398 exit 0