ctdb-tests: Make all_ips_on_node() do what it should
[Samba.git] / ctdb / tests / scripts / integration.bash
blob7b8699a37bf0a9ffa485265ede4a636a1ff89a72
1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
3 . "${TEST_SCRIPTS_DIR}/common.sh"
5 ######################################################################
7 export CTDB_TIMEOUT=60
9 if [ -n "$CTDB_TEST_REMOTE_DIR" ] ; then
10 CTDB_TEST_WRAPPER="${CTDB_TEST_REMOTE_DIR}/test_wrap"
11 else
12 _d=$(cd ${TEST_SCRIPTS_DIR}; echo $PWD)
13 CTDB_TEST_WRAPPER="$_d/test_wrap"
15 export CTDB_TEST_WRAPPER
17 # If $VALGRIND is set then use it whenever ctdb is called, but only if
18 # $CTDB is not already set.
19 [ -n "$CTDB" ] || export CTDB="${VALGRIND}${VALGRIND:+ }ctdb"
21 # why???
22 PATH="${TEST_SCRIPTS_DIR}:${PATH}"
24 ######################################################################
26 ctdb_test_exit ()
28 local status=$?
30 trap - 0
32 [ $(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures=$status
33 status=$(($testfailures+0))
35 # Avoid making a test fail from this point onwards. The test is
36 # now complete.
37 set +e
39 echo "*** TEST COMPLETED (RC=$status) AT $(date '+%F %T'), CLEANING UP..."
41 eval "$ctdb_test_exit_hook" || true
42 unset ctdb_test_exit_hook
44 if $ctdb_test_restart_scheduled || ! cluster_is_healthy ; then
46 restart_ctdb
47 else
48 # This could be made unconditional but then we might get
49 # duplication from the recovery in restart_ctdb. We want to
50 # leave the recovery in restart_ctdb so that future tests that
51 # might do a manual restart mid-test will benefit.
52 echo "Forcing a recovery..."
53 onnode 0 $CTDB recover
56 exit $status
59 ctdb_test_exit_hook_add ()
61 ctdb_test_exit_hook="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
64 ctdb_test_init ()
66 scriptname=$(basename "$0")
67 testfailures=0
68 ctdb_test_restart_scheduled=false
70 trap "ctdb_test_exit" 0
73 ########################################
75 # Sets: $out
76 try_command_on_node ()
78 local nodespec="$1" ; shift
80 local verbose=false
81 local onnode_opts=""
83 while [ "${nodespec#-}" != "$nodespec" ] ; do
84 if [ "$nodespec" = "-v" ] ; then
85 verbose=true
86 else
87 onnode_opts="${onnode_opts}${onnode_opts:+ }${nodespec}"
89 nodespec="$1" ; shift
90 done
92 local cmd="$*"
94 out=$(onnode -q $onnode_opts "$nodespec" "$cmd" 2>&1) || {
96 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
97 echo "$out"
98 return 1
101 if $verbose ; then
102 echo "Output of \"$cmd\":"
103 echo "$out"
107 sanity_check_output ()
109 local min_lines="$1"
110 local regexp="$2" # Should be anchored as necessary.
111 local output="$3"
113 local ret=0
115 local num_lines=$(echo "$output" | wc -l)
116 echo "There are $num_lines lines of output"
117 if [ $num_lines -lt $min_lines ] ; then
118 echo "BAD: that's less than the required number (${min_lines})"
119 ret=1
122 local status=0
123 local unexpected # local doesn't pass through status of command on RHS.
124 unexpected=$(echo "$output" | egrep -v "$regexp") || status=$?
126 # Note that this is reversed.
127 if [ $status -eq 0 ] ; then
128 echo "BAD: unexpected lines in output:"
129 echo "$unexpected" | cat -A
130 ret=1
131 else
132 echo "Output lines look OK"
135 return $ret
138 sanity_check_ips ()
140 local ips="$1" # list of "ip node" lines
142 echo "Sanity checking IPs..."
144 local x ipp prev
145 prev=""
146 while read x ipp ; do
147 [ "$ipp" = "-1" ] && break
148 if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
149 echo "OK"
150 return 0
152 prev="$ipp"
153 done <<<"$ips"
155 echo "BAD: a node was -1 or IPs are only assigned to one node"
156 echo "Are you running an old version of CTDB?"
157 return 1
160 # This returns a list of "ip node" lines in $out
161 all_ips_on_node()
163 local node="$1"
164 try_command_on_node $node \
165 "$CTDB ip -Y | awk -F: 'NR > 1 { print \$2, \$3 }'"
168 _select_test_node_and_ips ()
170 try_command_on_node any \
171 "$CTDB ip -Y -n all | awk -F: 'NR > 1 { print \$2, \$3 }'"
173 test_node="" # this matches no PNN
174 test_node_ips=""
175 local ip pnn
176 while read ip pnn ; do
177 if [ -z "$test_node" -a "$pnn" != "-1" ] ; then
178 test_node="$pnn"
180 if [ "$pnn" = "$test_node" ] ; then
181 test_node_ips="${test_node_ips}${test_node_ips:+ }${ip}"
183 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
185 echo "Selected node ${test_node} with IPs: ${test_node_ips}."
186 test_ip="${test_node_ips%% *}"
188 [ -n "$test_node" ] || return 1
191 select_test_node_and_ips ()
193 local timeout=10
194 while ! _select_test_node_and_ips ; do
195 echo "Unable to find a test node with IPs assigned"
196 if [ $timeout -le 0 ] ; then
197 echo "BAD: Too many attempts"
198 return 1
200 sleep_for 1
201 timeout=$(($timeout - 1))
202 done
204 return 0
207 # Sets: mask, iface
208 get_test_ip_mask_and_iface ()
210 # Find the interface
211 try_command_on_node $test_node "$CTDB ip -v -Y | awk -F: -v ip=$test_ip '\$2 == ip { print \$4 }'"
212 iface="$out"
214 if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
215 # Find the netmask
216 try_command_on_node $test_node ip addr show to $test_ip
217 mask="${out##*/}"
218 mask="${mask%% *}"
219 else
220 mask="24"
223 echo "$test_ip/$mask is on $iface"
226 #######################################
228 # Wait until either timeout expires or command succeeds. The command
229 # will be tried once per second, unless timeout has format T/I, where
230 # I is the recheck interval.
231 wait_until ()
233 local timeout="$1" ; shift # "$@" is the command...
235 local interval=1
236 case "$timeout" in
237 */*)
238 interval="${timeout#*/}"
239 timeout="${timeout%/*}"
240 esac
242 local negate=false
243 if [ "$1" = "!" ] ; then
244 negate=true
245 shift
248 echo -n "<${timeout}|"
249 local t=$timeout
250 while [ $t -gt 0 ] ; do
251 local rc=0
252 "$@" || rc=$?
253 if { ! $negate && [ $rc -eq 0 ] ; } || \
254 { $negate && [ $rc -ne 0 ] ; } ; then
255 echo "|$(($timeout - $t))|"
256 echo "OK"
257 return 0
259 local i
260 for i in $(seq 1 $interval) ; do
261 echo -n .
262 done
263 t=$(($t - $interval))
264 sleep $interval
265 done
267 echo "*TIMEOUT*"
269 return 1
272 sleep_for ()
274 echo -n "=${1}|"
275 for i in $(seq 1 $1) ; do
276 echo -n '.'
277 sleep 1
278 done
279 echo '|'
282 _cluster_is_healthy ()
284 $CTDB nodestatus all >/dev/null
287 _cluster_is_recovered ()
289 node_has_status all recovered
292 _cluster_is_ready ()
294 _cluster_is_healthy && _cluster_is_recovered
297 cluster_is_healthy ()
299 if onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
300 echo "Cluster is HEALTHY"
301 if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_recovered ; then
302 echo "WARNING: cluster in recovery mode!"
304 return 0
305 else
306 echo "Cluster is UNHEALTHY"
307 if ! ${ctdb_test_restart_scheduled:-false} ; then
308 echo "DEBUG AT $(date '+%F %T'):"
309 local i
310 for i in "onnode -q 0 $CTDB status" "onnode -q 0 onnode all $CTDB scriptstatus" ; do
311 echo "$i"
312 $i || true
313 done
315 return 1
319 wait_until_ready ()
321 local timeout="${1:-120}"
323 echo "Waiting for cluster to become ready..."
325 wait_until $timeout onnode -q any $CTDB_TEST_WRAPPER _cluster_is_ready
328 # This function is becoming nicely overloaded. Soon it will collapse! :-)
329 node_has_status ()
331 local pnn="$1"
332 local status="$2"
334 local bits fpat mpat rpat
335 case "$status" in
336 (unhealthy) bits="?:?:?:1:*" ;;
337 (healthy) bits="?:?:?:0:*" ;;
338 (disconnected) bits="1:*" ;;
339 (connected) bits="0:*" ;;
340 (banned) bits="?:1:*" ;;
341 (unbanned) bits="?:0:*" ;;
342 (disabled) bits="?:?:1:*" ;;
343 (enabled) bits="?:?:0:*" ;;
344 (stopped) bits="?:?:?:?:1:*" ;;
345 (notstopped) bits="?:?:?:?:0:*" ;;
346 (frozen) fpat='^[[:space:]]+frozen[[:space:]]+1$' ;;
347 (unfrozen) fpat='^[[:space:]]+frozen[[:space:]]+0$' ;;
348 (monon) mpat='^Monitoring mode:ACTIVE \(0\)$' ;;
349 (monoff) mpat='^Monitoring mode:DISABLED \(1\)$' ;;
350 (recovered) rpat='^Recovery mode:RECOVERY \(1\)$' ;;
352 echo "node_has_status: unknown status \"$status\""
353 return 1
354 esac
356 if [ -n "$bits" ] ; then
357 local out x line
359 out=$($CTDB -Y status 2>&1) || return 1
362 read x
363 while read line ; do
364 # This needs to be done in 2 steps to avoid false matches.
365 local line_bits="${line#:${pnn}:*:}"
366 [ "$line_bits" = "$line" ] && continue
367 [ "${line_bits#${bits}}" != "$line_bits" ] && return 0
368 done
369 return 1
370 } <<<"$out" # Yay bash!
371 elif [ -n "$fpat" ] ; then
372 $CTDB statistics -n "$pnn" | egrep -q "$fpat"
373 elif [ -n "$mpat" ] ; then
374 $CTDB getmonmode -n "$pnn" | egrep -q "$mpat"
375 elif [ -n "$rpat" ] ; then
376 ! $CTDB status -n "$pnn" | egrep -q "$rpat"
377 else
378 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
379 return 1
383 wait_until_node_has_status ()
385 local pnn="$1"
386 local status="$2"
387 local timeout="${3:-30}"
388 local proxy_pnn="${4:-any}"
390 echo "Waiting until node $pnn has status \"$status\"..."
392 if ! wait_until $timeout onnode $proxy_pnn $CTDB_TEST_WRAPPER node_has_status "$pnn" "$status" ; then
393 for i in "onnode -q any $CTDB status" "onnode -q any onnode all $CTDB scriptstatus" ; do
394 echo "$i"
395 $i || true
396 done
398 return 1
403 # Useful for superficially testing IP failover.
404 # IPs must be on the given node.
405 # If the first argument is '!' then the IPs must not be on the given node.
406 ips_are_on_node ()
408 local negating=false
409 if [ "$1" = "!" ] ; then
410 negating=true ; shift
412 local node="$1" ; shift
413 local ips="$*"
415 local out
417 all_ips_on_node $node
419 local check
420 for check in $ips ; do
421 local ip pnn
422 while read ip pnn ; do
423 if [ "$check" = "$ip" ] ; then
424 if [ "$pnn" = "$node" ] ; then
425 if $negating ; then return 1 ; fi
426 else
427 if ! $negating ; then return 1 ; fi
429 ips="${ips/${ip}}" # Remove from list
430 break
432 # If we're negating and we didn't see the address then it
433 # isn't hosted by anyone!
434 if $negating ; then
435 ips="${ips/${check}}"
437 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
438 done
440 ips="${ips// }" # Remove any spaces.
441 [ -z "$ips" ]
444 wait_until_ips_are_on_node ()
446 echo "Waiting for IPs to fail over..."
448 wait_until 60 ips_are_on_node "$@"
451 node_has_some_ips ()
453 local node="$1"
455 local out
457 all_ips_on_node $node
459 while read ip pnn ; do
460 if [ "$node" = "$pnn" ] ; then
461 return 0
463 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
465 return 1
468 wait_until_node_has_some_ips ()
470 echo "Waiting for node to have some IPs..."
472 wait_until 60 node_has_some_ips "$@"
475 #######################################
477 _ctdb_hack_options ()
479 local ctdb_options="$*"
481 case "$ctdb_options" in
482 *--start-as-stopped*)
483 export CTDB_START_AS_STOPPED="yes"
484 esac
487 restart_ctdb_1 ()
489 _ctdb_hack_options "$@"
491 if [ -e /etc/redhat-release ] ; then
492 service ctdb restart
493 else
494 /etc/init.d/ctdb restart
498 # Restart CTDB on all nodes. Override for local daemons.
499 _restart_ctdb_all ()
501 onnode -p all $CTDB_TEST_WRAPPER restart_ctdb_1 "$@"
504 # Nothing needed for a cluster. Override for local daemons.
505 setup_ctdb ()
510 restart_ctdb ()
512 # "$@" is passed to restart_ctdb_all.
514 echo -n "Restarting CTDB"
515 if $ctdb_test_restart_scheduled ; then
516 echo -n " (scheduled)"
518 echo "..."
520 local i
521 for i in $(seq 1 5) ; do
522 _restart_ctdb_all "$@" || {
523 echo "Restart failed. Trying again in a few seconds..."
524 sleep_for 5
525 continue
528 wait_until_ready || {
529 echo "Cluster didn't become ready. Restarting..."
530 continue
533 echo "Setting RerecoveryTimeout to 1"
534 onnode -pq all "$CTDB setvar RerecoveryTimeout 1"
536 # In recent versions of CTDB, forcing a recovery like this
537 # blocks until the recovery is complete. Hopefully this will
538 # help the cluster to stabilise before a subsequent test.
539 echo "Forcing a recovery..."
540 onnode -q 0 $CTDB recover
541 sleep_for 2
543 if ! onnode -q any $CTDB_TEST_WRAPPER _cluster_is_recovered ; then
544 echo "Cluster has gone into recovery again, waiting..."
545 wait_until 30/2 onnode -q any $CTDB_TEST_WRAPPER _cluster_is_recovered
549 # Cluster is still healthy. Good, we're done!
550 if ! onnode 0 $CTDB_TEST_WRAPPER _cluster_is_healthy ; then
551 echo "Cluster became UNHEALTHY again [$(date)]"
552 onnode -p all ctdb status -Y 2>&1
553 onnode -p all ctdb scriptstatus 2>&1
554 echo "Restarting..."
555 continue
558 echo "Doing a sync..."
559 onnode -q 0 $CTDB sync
561 echo "ctdb is ready"
562 return 0
563 done
565 echo "Cluster UNHEALTHY... too many attempts..."
566 onnode -p all ctdb status -Y 2>&1
567 onnode -p all ctdb scriptstatus 2>&1
569 # Try to make the calling test fail
570 status=1
571 return 1
574 # Does nothing on cluster and should be overridden for local daemons
575 maybe_stop_ctdb ()
580 ctdb_restart_when_done ()
582 ctdb_test_restart_scheduled=true
585 get_ctdbd_command_line_option ()
587 local pnn="$1"
588 local option="$2"
590 try_command_on_node "$pnn" "$CTDB getpid" || \
591 die "Unable to get PID of ctdbd on node $pnn"
593 local pid="${out#*:}"
594 try_command_on_node "$pnn" "ps -p $pid -o args hww" || \
595 die "Unable to get command-line of PID $pid"
597 # Strip everything up to and including --option
598 local t="${out#*--${option}}"
599 # Strip leading '=' or space if present
600 t="${t#=}"
601 t="${t# }"
602 # Strip any following options and print
603 echo "${t%% -*}"
606 #######################################
608 wait_for_monitor_event ()
610 local pnn="$1"
611 local timeout=120
613 echo "Waiting for a monitor event on node ${pnn}..."
615 try_command_on_node "$pnn" $CTDB scriptstatus || {
616 echo "Unable to get scriptstatus from node $pnn"
617 return 1
620 local ctdb_scriptstatus_original="$out"
621 wait_until 120 _ctdb_scriptstatus_changed
624 _ctdb_scriptstatus_changed ()
626 try_command_on_node "$pnn" $CTDB scriptstatus || {
627 echo "Unable to get scriptstatus from node $pnn"
628 return 1
631 [ "$out" != "$ctdb_scriptstatus_original" ]
634 #######################################
636 nfs_test_setup ()
638 select_test_node_and_ips
640 nfs_first_export=$(showmount -e $test_ip | sed -n -e '2s/ .*//p')
642 echo "Creating test subdirectory..."
643 try_command_on_node $test_node "mktemp -d --tmpdir=$nfs_first_export"
644 nfs_test_dir="$out"
645 try_command_on_node $test_node "chmod 777 $nfs_test_dir"
647 nfs_mnt_d=$(mktemp -d)
648 nfs_local_file="${nfs_mnt_d}/${nfs_test_dir##*/}/TEST_FILE"
649 nfs_remote_file="${nfs_test_dir}/TEST_FILE"
651 ctdb_test_exit_hook_add nfs_test_cleanup
653 echo "Mounting ${test_ip}:${nfs_first_export} on ${nfs_mnt_d} ..."
654 mount -o timeo=1,hard,intr,vers=3 \
655 ${test_ip}:${nfs_first_export} ${nfs_mnt_d}
658 nfs_test_cleanup ()
660 rm -f "$nfs_local_file"
661 umount -f "$nfs_mnt_d"
662 rmdir "$nfs_mnt_d"
663 onnode -q $test_node rmdir "$nfs_test_dir"
666 #######################################
668 # $1: pnn, $2: DB name
669 db_get_path ()
671 try_command_on_node -v $1 $CTDB getdbstatus "$2" |
672 sed -n -e "s@^path: @@p"
675 # $1: pnn, $2: DB name
676 db_ctdb_cattdb_count_records ()
678 try_command_on_node -v $1 $CTDB cattdb "$2" |
679 grep '^key' | grep -v '__db_sequence_number__' |
680 wc -l
683 # $1: pnn, $2: DB name, $3: key string, $4: value string, $5: RSN (default 7)
684 db_ctdb_tstore ()
686 _tdb=$(db_get_path $1 "$2")
687 _rsn="${5:-7}"
688 try_command_on_node $1 $CTDB tstore "$_tdb" "$3" "$4" "$_rsn"
691 # $1: pnn, $2: DB name, $3: dbseqnum (must be < 255!!!!!)
692 db_ctdb_tstore_dbseqnum ()
694 # "__db_sequence_number__" + trailing 0x00
695 _key='0x5f5f64625f73657175656e63655f6e756d6265725f5f00'
697 # Construct 8 byte (unit64_t) database sequence number. This
698 # probably breaks if $3 > 255
699 _value=$(printf "0x%02x%014x" $3 0)
701 db_ctdb_tstore $1 "$2" "$_key" "$_value"
704 #######################################
706 # Make sure that $CTDB is set.
707 : ${CTDB:=ctdb}
709 local="${TEST_SUBDIR}/scripts/local.bash"
710 if [ -r "$local" ] ; then
711 . "$local"