1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
3 .
"${TEST_SCRIPTS_DIR}/common.sh"
5 ######################################################################
9 if [ -n "$CTDB_TEST_REMOTE_DIR" ] ; then
10 CTDB_TEST_WRAPPER
="${CTDB_TEST_REMOTE_DIR}/test_wrap"
12 _d
=$
(cd ${TEST_SCRIPTS_DIR}; echo $PWD)
13 CTDB_TEST_WRAPPER
="$_d/test_wrap"
15 export CTDB_TEST_WRAPPER
17 # If $VALGRIND is set then use it whenever ctdb is called, but only if
18 # $CTDB is not already set.
19 [ -n "$CTDB" ] ||
export CTDB
="${VALGRIND}${VALGRIND:+ }ctdb"
22 PATH
="${TEST_SCRIPTS_DIR}:${PATH}"
24 ######################################################################
32 [ $
(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures
=$status
33 status
=$
(($testfailures+0))
35 # Avoid making a test fail from this point onwards. The test is
39 echo "*** TEST COMPLETED (RC=$status) AT $(date '+%F %T'), CLEANING UP..."
41 eval "$ctdb_test_exit_hook" || true
42 unset ctdb_test_exit_hook
44 if $ctdb_test_restart_scheduled ||
! cluster_is_healthy
; then
48 # This could be made unconditional but then we might get
49 # duplication from the recovery in restart_ctdb. We want to
50 # leave the recovery in restart_ctdb so that future tests that
51 # might do a manual restart mid-test will benefit.
52 echo "Forcing a recovery..."
53 onnode
0 $CTDB recover
59 ctdb_test_exit_hook_add
()
61 ctdb_test_exit_hook
="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
66 scriptname
=$
(basename "$0")
68 ctdb_test_restart_scheduled
=false
70 trap "ctdb_test_exit" 0
73 ########################################
76 try_command_on_node
()
78 local nodespec
="$1" ; shift
83 while [ "${nodespec#-}" != "$nodespec" ] ; do
84 if [ "$nodespec" = "-v" ] ; then
87 onnode_opts
="${onnode_opts}${onnode_opts:+ }${nodespec}"
94 out
=$
(onnode
-q $onnode_opts "$nodespec" "$cmd" 2>&1) ||
{
96 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
102 echo "Output of \"$cmd\":"
107 sanity_check_output
()
110 local regexp
="$2" # Should be anchored as necessary.
115 local num_lines
=$
(echo "$output" |
wc -l)
116 echo "There are $num_lines lines of output"
117 if [ $num_lines -lt $min_lines ] ; then
118 echo "BAD: that's less than the required number (${min_lines})"
123 local unexpected
# local doesn't pass through status of command on RHS.
124 unexpected
=$
(echo "$output" |
egrep -v "$regexp") || status
=$?
126 # Note that this is reversed.
127 if [ $status -eq 0 ] ; then
128 echo "BAD: unexpected lines in output:"
129 echo "$unexpected" |
cat -A
132 echo "Output lines look OK"
140 local ips
="$1" # list of "ip node" lines
142 echo "Sanity checking IPs..."
146 while read x ipp
; do
147 [ "$ipp" = "-1" ] && break
148 if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
155 echo "BAD: a node was -1 or IPs are only assigned to one node"
156 echo "Are you running an old version of CTDB?"
160 # This returns a list of "ip node" lines in $out
164 try_command_on_node
$node \
165 "$CTDB ip -Y | awk -F: 'NR > 1 { print \$2, \$3 }'"
168 _select_test_node_and_ips
()
170 try_command_on_node any \
171 "$CTDB ip -Y -n all | awk -F: 'NR > 1 { print \$2, \$3 }'"
173 test_node
="" # this matches no PNN
176 while read ip pnn
; do
177 if [ -z "$test_node" -a "$pnn" != "-1" ] ; then
180 if [ "$pnn" = "$test_node" ] ; then
181 test_node_ips
="${test_node_ips}${test_node_ips:+ }${ip}"
183 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
185 echo "Selected node ${test_node} with IPs: ${test_node_ips}."
186 test_ip
="${test_node_ips%% *}"
188 [ -n "$test_node" ] ||
return 1
191 select_test_node_and_ips
()
194 while ! _select_test_node_and_ips
; do
195 echo "Unable to find a test node with IPs assigned"
196 if [ $timeout -le 0 ] ; then
197 echo "BAD: Too many attempts"
201 timeout
=$
(($timeout - 1))
208 get_test_ip_mask_and_iface
()
211 try_command_on_node
$test_node "$CTDB ip -v -Y | awk -F: -v ip=$test_ip '\$2 == ip { print \$4 }'"
214 if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
216 try_command_on_node
$test_node ip addr show to
$test_ip
223 echo "$test_ip/$mask is on $iface"
226 #######################################
228 # Wait until either timeout expires or command succeeds. The command
229 # will be tried once per second, unless timeout has format T/I, where
230 # I is the recheck interval.
233 local timeout
="$1" ; shift # "$@" is the command...
238 interval
="${timeout#*/}"
239 timeout
="${timeout%/*}"
243 if [ "$1" = "!" ] ; then
248 echo -n "<${timeout}|"
250 while [ $t -gt 0 ] ; do
253 if { ! $negate && [ $rc -eq 0 ] ; } || \
254 { $negate && [ $rc -ne 0 ] ; } ; then
255 echo "|$(($timeout - $t))|"
260 for i
in $
(seq 1 $interval) ; do
263 t
=$
(($t - $interval))
275 for i
in $
(seq 1 $1) ; do
282 _cluster_is_healthy
()
284 $CTDB nodestatus all
>/dev
/null
287 _cluster_is_recovered
()
289 node_has_status all recovered
294 _cluster_is_healthy
&& _cluster_is_recovered
297 cluster_is_healthy
()
299 if onnode
0 $CTDB_TEST_WRAPPER _cluster_is_healthy
; then
300 echo "Cluster is HEALTHY"
301 if ! onnode
0 $CTDB_TEST_WRAPPER _cluster_is_recovered
; then
302 echo "WARNING: cluster in recovery mode!"
306 echo "Cluster is UNHEALTHY"
307 if ! ${ctdb_test_restart_scheduled:-false} ; then
308 echo "DEBUG AT $(date '+%F %T'):"
310 for i
in "onnode -q 0 $CTDB status" "onnode -q 0 onnode all $CTDB scriptstatus" ; do
321 local timeout
="${1:-120}"
323 echo "Waiting for cluster to become ready..."
325 wait_until
$timeout onnode
-q any
$CTDB_TEST_WRAPPER _cluster_is_ready
328 # This function is becoming nicely overloaded. Soon it will collapse! :-)
334 local bits fpat mpat rpat
336 (unhealthy
) bits
="?:?:?:1:*" ;;
337 (healthy
) bits
="?:?:?:0:*" ;;
338 (disconnected
) bits
="1:*" ;;
339 (connected
) bits
="0:*" ;;
340 (banned
) bits
="?:1:*" ;;
341 (unbanned
) bits
="?:0:*" ;;
342 (disabled
) bits
="?:?:1:*" ;;
343 (enabled
) bits
="?:?:0:*" ;;
344 (stopped
) bits
="?:?:?:?:1:*" ;;
345 (notstopped
) bits
="?:?:?:?:0:*" ;;
346 (frozen
) fpat
='^[[:space:]]+frozen[[:space:]]+1$' ;;
347 (unfrozen
) fpat
='^[[:space:]]+frozen[[:space:]]+0$' ;;
348 (monon
) mpat
='^Monitoring mode:ACTIVE \(0\)$' ;;
349 (monoff
) mpat
='^Monitoring mode:DISABLED \(1\)$' ;;
350 (recovered
) rpat
='^Recovery mode:RECOVERY \(1\)$' ;;
352 echo "node_has_status: unknown status \"$status\""
356 if [ -n "$bits" ] ; then
359 out
=$
($CTDB -Y status
2>&1) ||
return 1
364 # This needs to be done in 2 steps to avoid false matches.
365 local line_bits
="${line#:${pnn}:*:}"
366 [ "$line_bits" = "$line" ] && continue
367 [ "${line_bits#${bits}}" != "$line_bits" ] && return 0
370 } <<<"$out" # Yay bash!
371 elif [ -n "$fpat" ] ; then
372 $CTDB statistics
-n "$pnn" |
egrep -q "$fpat"
373 elif [ -n "$mpat" ] ; then
374 $CTDB getmonmode
-n "$pnn" |
egrep -q "$mpat"
375 elif [ -n "$rpat" ] ; then
376 ! $CTDB status
-n "$pnn" |
egrep -q "$rpat"
378 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
383 wait_until_node_has_status
()
387 local timeout
="${3:-30}"
388 local proxy_pnn
="${4:-any}"
390 echo "Waiting until node $pnn has status \"$status\"..."
392 if ! wait_until
$timeout onnode
$proxy_pnn $CTDB_TEST_WRAPPER node_has_status
"$pnn" "$status" ; then
393 for i
in "onnode -q any $CTDB status" "onnode -q any onnode all $CTDB scriptstatus" ; do
403 # Useful for superficially testing IP failover.
404 # IPs must be on the given node.
405 # If the first argument is '!' then the IPs must not be on the given node.
409 if [ "$1" = "!" ] ; then
410 negating
=true
; shift
412 local node
="$1" ; shift
417 all_ips_on_node
$node
420 for check
in $ips ; do
422 while read ip pnn
; do
423 if [ "$check" = "$ip" ] ; then
424 if [ "$pnn" = "$node" ] ; then
425 if $negating ; then return 1 ; fi
427 if ! $negating ; then return 1 ; fi
429 ips
="${ips/${ip}}" # Remove from list
432 # If we're negating and we didn't see the address then it
433 # isn't hosted by anyone!
435 ips
="${ips/${check}}"
437 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
440 ips
="${ips// }" # Remove any spaces.
444 wait_until_ips_are_on_node
()
446 # Go to some trouble to print a use description of what is happening
448 if [ "$1" == "!" ] ; then
455 [ "$i" != "!" ] ||
continue
456 if [ -z "$node" ] ; then
460 ips
="${ips}${ips:+, }${i}"
462 echo "Waiting for ${ips} to ${not}be assigned to node ${node}"
464 wait_until
60 ips_are_on_node
"$@"
473 all_ips_on_node
$node
475 while read ip pnn
; do
476 if [ "$node" = "$pnn" ] ; then
479 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
484 wait_until_node_has_some_ips
()
486 echo "Waiting for node to have some IPs..."
488 wait_until
60 node_has_some_ips
"$@"
491 #######################################
493 _ctdb_hack_options
()
495 local ctdb_options
="$*"
497 case "$ctdb_options" in
498 *--start-as-stopped*)
499 export CTDB_START_AS_STOPPED
="yes"
505 _ctdb_hack_options
"$@"
507 if [ -e /etc
/redhat-release
] ; then
510 /etc
/init.d
/ctdb restart
514 # Restart CTDB on all nodes. Override for local daemons.
517 onnode
-p all
$CTDB_TEST_WRAPPER restart_ctdb_1
"$@"
520 # Nothing needed for a cluster. Override for local daemons.
528 # "$@" is passed to restart_ctdb_all.
530 echo -n "Restarting CTDB"
531 if $ctdb_test_restart_scheduled ; then
532 echo -n " (scheduled)"
537 for i
in $
(seq 1 5) ; do
538 _restart_ctdb_all
"$@" ||
{
539 echo "Restart failed. Trying again in a few seconds..."
544 wait_until_ready ||
{
545 echo "Cluster didn't become ready. Restarting..."
549 echo "Setting RerecoveryTimeout to 1"
550 onnode
-pq all
"$CTDB setvar RerecoveryTimeout 1"
552 # In recent versions of CTDB, forcing a recovery like this
553 # blocks until the recovery is complete. Hopefully this will
554 # help the cluster to stabilise before a subsequent test.
555 echo "Forcing a recovery..."
556 onnode
-q 0 $CTDB recover
559 if ! onnode
-q any
$CTDB_TEST_WRAPPER _cluster_is_recovered
; then
560 echo "Cluster has gone into recovery again, waiting..."
561 wait_until
30/2 onnode
-q any
$CTDB_TEST_WRAPPER _cluster_is_recovered
565 # Cluster is still healthy. Good, we're done!
566 if ! onnode
0 $CTDB_TEST_WRAPPER _cluster_is_healthy
; then
567 echo "Cluster became UNHEALTHY again [$(date)]"
568 onnode
-p all ctdb status
-Y 2>&1
569 onnode
-p all ctdb scriptstatus
2>&1
574 echo "Doing a sync..."
575 onnode
-q 0 $CTDB sync
581 echo "Cluster UNHEALTHY... too many attempts..."
582 onnode
-p all ctdb status
-Y 2>&1
583 onnode
-p all ctdb scriptstatus
2>&1
585 # Try to make the calling test fail
590 # Does nothing on cluster and should be overridden for local daemons
596 ctdb_restart_when_done
()
598 ctdb_test_restart_scheduled
=true
601 get_ctdbd_command_line_option
()
606 try_command_on_node
"$pnn" "$CTDB getpid" || \
607 die
"Unable to get PID of ctdbd on node $pnn"
609 local pid
="${out#*:}"
610 try_command_on_node
"$pnn" "ps -p $pid -o args hww" || \
611 die
"Unable to get command-line of PID $pid"
613 # Strip everything up to and including --option
614 local t
="${out#*--${option}}"
615 # Strip leading '=' or space if present
618 # Strip any following options and print
622 #######################################
624 wait_for_monitor_event
()
629 echo "Waiting for a monitor event on node ${pnn}..."
631 try_command_on_node
"$pnn" $CTDB scriptstatus ||
{
632 echo "Unable to get scriptstatus from node $pnn"
636 local ctdb_scriptstatus_original
="$out"
637 wait_until
120 _ctdb_scriptstatus_changed
640 _ctdb_scriptstatus_changed
()
642 try_command_on_node
"$pnn" $CTDB scriptstatus ||
{
643 echo "Unable to get scriptstatus from node $pnn"
647 [ "$out" != "$ctdb_scriptstatus_original" ]
650 #######################################
654 select_test_node_and_ips
656 nfs_first_export
=$
(showmount
-e $test_ip |
sed -n -e '2s/ .*//p')
658 echo "Creating test subdirectory..."
659 try_command_on_node
$test_node "mktemp -d --tmpdir=$nfs_first_export"
661 try_command_on_node
$test_node "chmod 777 $nfs_test_dir"
663 nfs_mnt_d
=$
(mktemp
-d)
664 nfs_local_file
="${nfs_mnt_d}/${nfs_test_dir##*/}/TEST_FILE"
665 nfs_remote_file
="${nfs_test_dir}/TEST_FILE"
667 ctdb_test_exit_hook_add nfs_test_cleanup
669 echo "Mounting ${test_ip}:${nfs_first_export} on ${nfs_mnt_d} ..."
670 mount
-o timeo
=1,hard
,intr
,vers
=3 \
671 ${test_ip}:${nfs_first_export} ${nfs_mnt_d}
676 rm -f "$nfs_local_file"
677 umount
-f "$nfs_mnt_d"
679 onnode
-q $test_node rmdir "$nfs_test_dir"
682 #######################################
684 # $1: pnn, $2: DB name
687 try_command_on_node
-v $1 $CTDB getdbstatus
"$2" |
688 sed -n -e "s@^path: @@p"
691 # $1: pnn, $2: DB name
692 db_ctdb_cattdb_count_records
()
694 try_command_on_node
-v $1 $CTDB cattdb
"$2" |
695 grep '^key' |
grep -v '__db_sequence_number__' |
699 # $1: pnn, $2: DB name, $3: key string, $4: value string, $5: RSN (default 7)
702 _tdb
=$
(db_get_path
$1 "$2")
704 try_command_on_node
$1 $CTDB tstore
"$_tdb" "$3" "$4" "$_rsn"
707 # $1: pnn, $2: DB name, $3: dbseqnum (must be < 255!!!!!)
708 db_ctdb_tstore_dbseqnum
()
710 # "__db_sequence_number__" + trailing 0x00
711 _key
='0x5f5f64625f73657175656e63655f6e756d6265725f5f00'
713 # Construct 8 byte (unit64_t) database sequence number. This
714 # probably breaks if $3 > 255
715 _value
=$
(printf "0x%02x%014x" $3 0)
717 db_ctdb_tstore
$1 "$2" "$_key" "$_value"
720 #######################################
722 # Make sure that $CTDB is set.
725 local="${TEST_SUBDIR}/scripts/local.bash"
726 if [ -r "$local" ] ; then