1 # Hey Emacs, this is a -*- shell-script -*- !!! :-)
3 .
"${TEST_SCRIPTS_DIR}/common.sh"
5 # If we're not running on a real cluster then we need a local copy of
6 # ctdb (and other stuff) in $PATH and we will use local daemons.
7 if [ -n "$TEST_LOCAL_DAEMONS" ] ; then
8 export CTDB_NODES_SOCKETS
=""
9 for i
in $
(seq 0 $
(($TEST_LOCAL_DAEMONS - 1))) ; do
10 CTDB_NODES_SOCKETS
="${CTDB_NODES_SOCKETS}${CTDB_NODES_SOCKETS:+ }${TEST_VAR_DIR}/sock.${i}"
13 # Use in-tree binaries if running against local daemons.
14 # Otherwise CTDB need to be installed on all nodes.
15 if [ -n "$ctdb_dir" -a -d "${ctdb_dir}/bin" ] ; then
16 PATH
="${ctdb_dir}/bin:${PATH}"
17 export CTDB_LOCK_HELPER
="${ctdb_dir}/bin/ctdb_lock_helper"
20 export CTDB_NODES
="${TEST_VAR_DIR}/nodes.txt"
23 ######################################################################
25 export CTDB_TIMEOUT
=60
27 if [ -n "$CTDB_TEST_REMOTE_DIR" ] ; then
28 CTDB_TEST_WRAPPER
="${CTDB_TEST_REMOTE_DIR}/test_wrap"
30 _d
=$
(cd ${TEST_SCRIPTS_DIR}; echo $PWD)
31 CTDB_TEST_WRAPPER
="$_d/test_wrap"
33 export CTDB_TEST_WRAPPER
35 # If $VALGRIND is set then use it whenever ctdb is called, but only if
36 # $CTDB is not already set.
37 [ -n "$CTDB" ] ||
export CTDB
="${VALGRIND}${VALGRIND:+ }ctdb"
40 PATH
="${TEST_SCRIPTS_DIR}:${PATH}"
42 ######################################################################
44 ctdb_check_time_logs
()
53 out
=$
(onnode all
tail -n 20 "${TEST_VAR_DIR}/ctdb.test.time.log" 2>&1)
55 if [ $?
-eq 0 ] ; then
60 node
="${line#>> NODE: }"
67 if [ -n "$ds_prev" ] && \
68 [ $
(($ds_curr - $ds_prev)) -ge $threshold ] ; then
69 echo "Node $node had time jump of $(($ds_curr - $ds_prev))ds between $(date +'%T' -d @${ds_prev%?}) and $(date +'%T' -d @${ds_curr%?})"
78 echo Error getting
time logs
81 echo "Check time sync (test client first):"
84 echo "Information from test client:"
87 echo "Information from cluster nodes:"
88 onnode all
"top -b -n 1 ; echo '/proc/slabinfo' ; cat /proc/slabinfo"
98 [ $
(($testfailures+0)) -eq 0 -a $status -ne 0 ] && testfailures
=$status
99 status
=$
(($testfailures+0))
101 # Avoid making a test fail from this point onwards. The test is
105 echo "*** TEST COMPLETED (RC=$status) AT $(date '+%F %T'), CLEANING UP..."
107 if [ -z "$TEST_LOCAL_DAEMONS" -a -n "$CTDB_TEST_TIME_LOGGING" -a \
108 $status -ne 0 ] ; then
112 eval "$ctdb_test_exit_hook" || true
113 unset ctdb_test_exit_hook
115 if $ctdb_test_restart_scheduled ||
! cluster_is_healthy
; then
119 # This could be made unconditional but then we might get
120 # duplication from the recovery in restart_ctdb. We want to
121 # leave the recovery in restart_ctdb so that future tests that
122 # might do a manual restart mid-test will benefit.
123 echo "Forcing a recovery..."
124 onnode
0 $CTDB recover
130 ctdb_test_exit_hook_add
()
132 ctdb_test_exit_hook
="${ctdb_test_exit_hook}${ctdb_test_exit_hook:+ ; }$*"
137 scriptname
=$
(basename "$0")
139 ctdb_test_restart_scheduled
=false
141 trap "ctdb_test_exit" 0
144 ########################################
147 try_command_on_node
()
149 local nodespec
="$1" ; shift
154 while [ "${nodespec#-}" != "$nodespec" ] ; do
155 if [ "$nodespec" = "-v" ] ; then
158 onnode_opts
="${onnode_opts}${onnode_opts:+ }${nodespec}"
160 nodespec
="$1" ; shift
165 out
=$
(onnode
-q $onnode_opts "$nodespec" "$cmd" 2>&1) ||
{
167 echo "Failed to execute \"$cmd\" on node(s) \"$nodespec\""
173 echo "Output of \"$cmd\":"
178 sanity_check_output
()
181 local regexp
="$2" # Should be anchored as necessary.
186 local num_lines
=$
(echo "$output" |
wc -l)
187 echo "There are $num_lines lines of output"
188 if [ $num_lines -lt $min_lines ] ; then
189 echo "BAD: that's less than the required number (${min_lines})"
194 local unexpected
# local doesn't pass through status of command on RHS.
195 unexpected
=$
(echo "$output" |
egrep -v "$regexp") || status
=$?
197 # Note that this is reversed.
198 if [ $status -eq 0 ] ; then
199 echo "BAD: unexpected lines in output:"
200 echo "$unexpected" |
cat -A
203 echo "Output lines look OK"
211 local ips
="$1" # list of "ip node" lines
213 echo "Sanity checking IPs..."
217 while read x ipp
; do
218 [ "$ipp" = "-1" ] && break
219 if [ -n "$prev" -a "$ipp" != "$prev" ] ; then
226 echo "BAD: a node was -1 or IPs are only assigned to one node"
227 echo "Are you running an old version of CTDB?"
231 # This returns a list of "ip node" lines in $out
235 try_command_on_node
$node "$CTDB ip -Y -n all | cut -d ':' -f1-3 | sed -e '1d' -e 's@^:@@' -e 's@:@ @g'"
238 _select_test_node_and_ips
()
242 test_node
="" # this matches no PNN
245 while read ip pnn
; do
246 if [ -z "$test_node" -a "$pnn" != "-1" ] ; then
249 if [ "$pnn" = "$test_node" ] ; then
250 test_node_ips
="${test_node_ips}${test_node_ips:+ }${ip}"
252 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
254 echo "Selected node ${test_node} with IPs: ${test_node_ips}."
255 test_ip
="${test_node_ips%% *}"
257 [ -n "$test_node" ] ||
return 1
260 select_test_node_and_ips
()
263 while ! _select_test_node_and_ips
; do
264 echo "Unable to find a test node with IPs assigned"
265 if [ $timeout -le 0 ] ; then
266 echo "BAD: Too many attempts"
270 timeout
=$
(($timeout - 1))
276 #######################################
278 # Wait until either timeout expires or command succeeds. The command
279 # will be tried once per second.
282 local timeout
="$1" ; shift # "$@" is the command...
285 if [ "$1" = "!" ] ; then
290 echo -n "<${timeout}|"
292 while [ $t -gt 0 ] ; do
295 if { ! $negate && [ $rc -eq 0 ] ; } || \
296 { $negate && [ $rc -ne 0 ] ; } ; then
297 echo "|$(($timeout - $t))|"
314 for i
in $
(seq 1 $1) ; do
321 _cluster_is_healthy
()
323 $CTDB nodestatus all
>/dev
/null
&& \
324 node_has_status
0 recovered
327 cluster_is_healthy
()
329 if onnode
0 $CTDB_TEST_WRAPPER _cluster_is_healthy
; then
330 echo "Cluster is HEALTHY"
333 echo "Cluster is UNHEALTHY"
334 if ! ${ctdb_test_restart_scheduled:-false} ; then
335 echo "DEBUG AT $(date '+%F %T'):"
337 for i
in "onnode -q 0 $CTDB status" "onnode -q 0 onnode all $CTDB scriptstatus" ; do
346 wait_until_healthy
()
348 local timeout
="${1:-120}"
350 echo "Waiting for cluster to become healthy..."
352 wait_until
120 _cluster_is_healthy
355 # This function is becoming nicely overloaded. Soon it will collapse! :-)
361 local bits fpat mpat rpat
363 (unhealthy
) bits
="?:?:?:1:*" ;;
364 (healthy
) bits
="?:?:?:0:*" ;;
365 (disconnected
) bits
="1:*" ;;
366 (connected
) bits
="0:*" ;;
367 (banned
) bits
="?:1:*" ;;
368 (unbanned
) bits
="?:0:*" ;;
369 (disabled
) bits
="?:?:1:*" ;;
370 (enabled
) bits
="?:?:0:*" ;;
371 (stopped
) bits
="?:?:?:?:1:*" ;;
372 (notstopped
) bits
="?:?:?:?:0:*" ;;
373 (frozen
) fpat
='^[[:space:]]+frozen[[:space:]]+1$' ;;
374 (unfrozen
) fpat
='^[[:space:]]+frozen[[:space:]]+0$' ;;
375 (monon
) mpat
='^Monitoring mode:ACTIVE \(0\)$' ;;
376 (monoff
) mpat
='^Monitoring mode:DISABLED \(1\)$' ;;
377 (recovered
) rpat
='^Recovery mode:NORMAL \(0\)$' ;;
379 echo "node_has_status: unknown status \"$status\""
383 if [ -n "$bits" ] ; then
386 out
=$
($CTDB -Y status
2>&1) ||
return 1
391 # This needs to be done in 2 steps to avoid false matches.
392 local line_bits
="${line#:${pnn}:*:}"
393 [ "$line_bits" = "$line" ] && continue
394 [ "${line_bits#${bits}}" != "$line_bits" ] && return 0
397 } <<<"$out" # Yay bash!
398 elif [ -n "$fpat" ] ; then
399 $CTDB statistics
-n "$pnn" |
egrep -q "$fpat"
400 elif [ -n "$mpat" ] ; then
401 $CTDB getmonmode
-n "$pnn" |
egrep -q "$mpat"
402 elif [ -n "$rpat" ] ; then
403 $CTDB status
-n "$pnn" |
egrep -q "$rpat"
405 echo 'node_has_status: unknown mode, neither $bits nor $fpat is set'
410 wait_until_node_has_status
()
414 local timeout
="${3:-30}"
415 local proxy_pnn
="${4:-any}"
417 echo "Waiting until node $pnn has status \"$status\"..."
419 if ! wait_until
$timeout onnode
$proxy_pnn $CTDB_TEST_WRAPPER node_has_status
"$pnn" "$status" ; then
420 for i
in "onnode -q any $CTDB status" "onnode -q any onnode all $CTDB scriptstatus" ; do
430 # Useful for superficially testing IP failover.
431 # IPs must be on nodes matching nodeglob.
432 # If the first argument is '!' then the IPs must not be on nodes
434 ips_are_on_nodeglob
()
437 if [ "$1" = "!" ] ; then
438 negating
=true
; shift
440 local nodeglob
="$1" ; shift
447 for check
in $ips ; do
448 while read ip pnn
; do
449 if [ "$check" = "$ip" ] ; then
451 ($nodeglob) if $negating ; then return 1 ; fi ;;
452 (*) if ! $negating ; then return 1 ; fi ;;
454 ips
="${ips/${ip}}" # Remove from list
457 # If we're negating and we didn't see the address then it
458 # isn't hosted by anyone!
460 ips
="${ips/${check}}"
462 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
465 ips
="${ips// }" # Remove any spaces.
469 wait_until_ips_are_on_nodeglob
()
471 echo "Waiting for IPs to fail over..."
473 wait_until
60 ips_are_on_nodeglob
"$@"
484 while read ip pnn
; do
485 if [ "$node" = "$pnn" ] ; then
488 done <<<"$out" # bashism to avoid problem setting variable in pipeline.
493 wait_until_node_has_some_ips
()
495 echo "Waiting for node to have some IPs..."
497 wait_until
60 node_has_some_ips
"$@"
500 #######################################
504 echo "Attempting to politely shutdown daemons..."
505 onnode
1 $CTDB shutdown
-n all || true
507 echo "Sleeping for a while..."
510 local pat
="ctdbd --socket=${TEST_VAR_DIR}/.* --nlist .* --nopublicipcheck"
511 if pgrep
-f "$pat" >/dev
/null
; then
512 echo "Killing remaining daemons..."
515 if pgrep
-f "$pat" >/dev
/null
; then
516 echo "Once more with feeling.."
521 rm -rf "${TEST_VAR_DIR}/test.db"
526 mkdir
-p "${TEST_VAR_DIR}/test.db/persistent"
528 local public_addresses_all
="${TEST_VAR_DIR}/public_addresses_all"
529 local no_public_addresses
="${TEST_VAR_DIR}/no_public_addresses.txt"
530 rm -f $CTDB_NODES $public_addresses_all $no_public_addresses
532 # If there are (strictly) greater than 2 nodes then we'll randomly
533 # choose a node to have no public addresses.
534 local no_public_ips
=-1
535 [ $TEST_LOCAL_DAEMONS -gt 2 ] && no_public_ips
=$
(($RANDOM % $TEST_LOCAL_DAEMONS))
536 echo "$no_public_ips" >$no_public_addresses
538 # When running certain tests we add and remove eventscripts, so we
539 # need to be able to modify the events.d/ directory. Therefore,
540 # we use a temporary events.d/ directory under $TEST_VAR_DIR. We
541 # copy the actual test eventscript(s) in there from the original
542 # events.d/ directory that sits alongside $TEST_SCRIPT_DIR.
543 local top
=$
(dirname "$TEST_SCRIPTS_DIR")
544 local events_d
="${top}/events.d"
545 mkdir
-p "${TEST_VAR_DIR}/events.d"
546 cp -p "${events_d}/"* "${TEST_VAR_DIR}/events.d/"
549 for i
in $
(seq 1 $TEST_LOCAL_DAEMONS) ; do
550 if [ "${CTDB_USE_IPV6}x" != "x" ]; then
551 echo ::$i >>"$CTDB_NODES"
552 ip addr add
::$i/128 dev lo
554 echo 127.0.0.
$i >>"$CTDB_NODES"
555 # 2 public addresses on most nodes, just to make things interesting.
556 if [ $
(($i - 1)) -ne $no_public_ips ] ; then
557 echo "192.168.234.$i/24 lo" >>"$public_addresses_all"
558 echo "192.168.234.$(($i + $TEST_LOCAL_DAEMONS))/24 lo" >>"$public_addresses_all"
567 shift # "$@" gets passed to ctdbd
569 local public_addresses_all
="${TEST_VAR_DIR}/public_addresses_all"
570 local public_addresses_mine
="${TEST_VAR_DIR}/public_addresses.${pnn}"
571 local no_public_addresses
="${TEST_VAR_DIR}/no_public_addresses.txt"
573 local no_public_ips
=-1
574 [ -r $no_public_addresses ] && read no_public_ips
<$no_public_addresses
576 if [ "$no_public_ips" = $pnn ] ; then
577 echo "Node $no_public_ips will have no public IPs."
580 local node_ip
=$
(sed -n -e "$(($pnn + 1))p" "$CTDB_NODES")
581 local ctdb_options
="--sloppy-start --reclock=${TEST_VAR_DIR}/rec.lock --nlist $CTDB_NODES --nopublicipcheck --listen=${node_ip} --event-script-dir=${TEST_VAR_DIR}/events.d --logfile=${TEST_VAR_DIR}/daemon.${pnn}.log -d 3 --log-ringbuf-size=10000 --dbdir=${TEST_VAR_DIR}/test.db --dbdir-persistent=${TEST_VAR_DIR}/test.db/persistent --dbdir-state=${TEST_VAR_DIR}/test.db/state"
583 if [ $pnn -eq $no_public_ips ] ; then
584 ctdb_options
="$ctdb_options --public-addresses=/dev/null"
586 cp "$public_addresses_all" "$public_addresses_mine"
587 ctdb_options
="$ctdb_options --public-addresses=$public_addresses_mine"
590 # We'll use "pkill -f" to kill the daemons with
591 # "--socket=.* --nlist .* --nopublicipcheck" as context.
592 $VALGRIND ctdbd
--socket="${TEST_VAR_DIR}/sock.$pnn" $ctdb_options "$@" ||
return 1
597 # "$@" gets passed to ctdbd
599 echo "Starting $TEST_LOCAL_DAEMONS ctdb daemons..."
601 for i
in $
(seq 0 $
(($TEST_LOCAL_DAEMONS - 1))) ; do
602 daemons_start_1
$i "$@"
606 #######################################
608 _ctdb_hack_options
()
610 local ctdb_options
="$*"
612 case "$ctdb_options" in
613 *--start-as-stopped*)
614 export CTDB_START_AS_STOPPED
="yes"
620 _ctdb_hack_options
"$@"
622 if [ -e /etc
/redhat-release
] ; then
625 /etc
/init.d
/ctdb restart
631 _ctdb_hack_options
"$@"
633 /etc
/init.d
/ctdb start
638 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
643 # Common things to do after starting one or more nodes.
646 onnode
-q 1 $CTDB_TEST_WRAPPER wait_until_healthy ||
return 1
648 echo "Setting RerecoveryTimeout to 1"
649 onnode
-pq all
"$CTDB setvar RerecoveryTimeout 1"
651 # In recent versions of CTDB, forcing a recovery like this blocks
652 # until the recovery is complete. Hopefully this will help the
653 # cluster to stabilise before a subsequent test.
654 echo "Forcing a recovery..."
655 onnode
-q 0 $CTDB recover
661 # This assumes that ctdbd is not running on the given node.
665 shift # "$@" is passed to ctdbd start.
667 echo -n "Starting CTDB on node ${pnn}..."
669 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
670 daemons_start_1
$pnn "$@"
672 onnode
$pnn $CTDB_TEST_WRAPPER _ctdb_start
"$@"
675 # If we're starting only 1 node then we're doing something weird.
676 ctdb_restart_when_done
681 # "$@" is passed to ctdbd start.
683 echo -n "Restarting CTDB"
684 if $ctdb_test_restart_scheduled ; then
685 echo -n " (scheduled)"
690 for i
in $
(seq 1 5) ; do
691 if [ -n "$CTDB_NODES_SOCKETS" ] ; then
695 onnode
-p all
$CTDB_TEST_WRAPPER _restart_ctdb
"$@"
697 echo "Restart failed. Trying again in a few seconds..."
702 onnode
-q 1 $CTDB_TEST_WRAPPER wait_until_healthy ||
{
703 echo "Cluster didn't become healthy. Restarting..."
707 echo "Setting RerecoveryTimeout to 1"
708 onnode
-pq all
"$CTDB setvar RerecoveryTimeout 1"
710 # In recent versions of CTDB, forcing a recovery like this
711 # blocks until the recovery is complete. Hopefully this will
712 # help the cluster to stabilise before a subsequent test.
713 echo "Forcing a recovery..."
714 onnode
-q 0 $CTDB recover
717 # Cluster is still healthy. Good, we're done!
718 if ! onnode
0 $CTDB_TEST_WRAPPER _cluster_is_healthy
; then
719 echo "Cluster became UNHEALTHY again [$(date)]"
720 onnode
-p all ctdb status
-Y 2>&1
721 onnode
-p all ctdb scriptstatus
2>&1
726 echo "Doing a sync..."
727 onnode
-q 0 $CTDB sync
733 echo "Cluster UNHEALTHY... too many attempts..."
734 onnode
-p all ctdb status
-Y 2>&1
735 onnode
-p all ctdb scriptstatus
2>&1
737 # Try to make the calling test fail
742 ctdb_restart_when_done
()
744 ctdb_test_restart_scheduled
=true
747 get_ctdbd_command_line_option
()
752 try_command_on_node
"$pnn" "$CTDB getpid" || \
753 die
"Unable to get PID of ctdbd on node $pnn"
755 local pid
="${out#*:}"
756 try_command_on_node
"$pnn" "ps -p $pid -o args hww" || \
757 die
"Unable to get command-line of PID $pid"
759 # Strip everything up to and including --option
760 local t
="${out#*--${option}}"
761 # Strip leading '=' or space if present
764 # Strip any following options and print
768 #######################################
770 install_eventscript
()
772 local script_name
="$1"
773 local script_contents
="$2"
775 if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
776 # The quoting here is *very* fragile. However, we do
777 # experience the joy of installing a short script using
778 # onnode, and without needing to know the IP addresses of the
780 onnode all
"f=\"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\" ; echo \"Installing \$f\" ; echo '${script_contents}' > \"\$f\" ; chmod 755 \"\$f\""
782 f
="${TEST_VAR_DIR}/events.d/${script_name}"
783 echo "$script_contents" >"$f"
788 uninstall_eventscript
()
790 local script_name
="$1"
792 if [ -z "$TEST_LOCAL_DAEMONS" ] ; then
793 onnode all
"rm -vf \"\${CTDB_BASE:-/etc/ctdb}/events.d/${script_name}\""
795 rm -vf "${TEST_VAR_DIR}/events.d/${script_name}"
799 #######################################
801 # This section deals with the 99.ctdb_test eventscript.
803 # Metafunctions: Handle a ctdb-test file on a node.
805 ctdb_test_eventscript_file_create
()
810 try_command_on_node
$pnn touch "/tmp/ctdb-test-${type}.${pnn}"
813 ctdb_test_eventscript_file_remove
()
818 try_command_on_node
$pnn rm -f "/tmp/ctdb-test-${type}.${pnn}"
821 ctdb_test_eventscript_file_exists
()
826 try_command_on_node
$pnn test -f "/tmp/ctdb-test-${type}.${pnn}" >/dev
/null
2>&1
830 # Handle a flag file on a node that is removed by 99.ctdb_test on the
832 ctdb_test_eventscript_flag
()
838 ctdb_test_eventscript_file_
${cmd} "$pnn" "flag-${event}"
842 # Handle a trigger that causes 99.ctdb_test to fail it's monitor
844 ctdb_test_eventscript_unhealthy_trigger
()
849 ctdb_test_eventscript_file_
${cmd} "$pnn" "unhealthy-trigger"
852 # Handle the file that 99.ctdb_test created to show that it has marked
853 # a node unhealthy because it detected the above trigger.
854 ctdb_test_eventscript_unhealthy_detected
()
859 ctdb_test_eventscript_file_
${cmd} "$pnn" "unhealthy-detected"
862 # Handle a trigger that causes 99.ctdb_test to timeout it's monitor
863 # event. This should cause the node to be banned.
864 ctdb_test_eventscript_timeout_trigger
()
870 ctdb_test_eventscript_file_
${cmd} "$pnn" "${event}-timeout"
873 # Note that the eventscript can't use the above functions!
874 ctdb_test_eventscript_install
()
877 local script='#!/bin/sh
881 rm -vf "/tmp/ctdb-test-flag-${1}.${pnn}"
883 trigger="/tmp/ctdb-test-unhealthy-trigger.${pnn}"
884 detected="/tmp/ctdb-test-unhealthy-detected.${pnn}"
885 timeout_trigger="/tmp/ctdb-test-${1}-timeout.${pnn}"
888 if [ -e "$trigger" ] ; then
889 echo "${0}: Unhealthy because \"$trigger\" detected"
892 elif [ -e "$detected" -a ! -e "$trigger" ] ; then
893 echo "${0}: Healthy again, \"$trigger\" no longer detected"
899 if [ -e "$timeout_trigger" ] ; then
900 echo "${0}: Sleeping for a long time because \"$timeout_trigger\" detected"
910 install_eventscript
"99.ctdb_test" "$script"
913 ctdb_test_eventscript_uninstall
()
915 uninstall_eventscript
"99.ctdb_test"
918 # Note that this only works if you know all other monitor events will
919 # succeed. You also need to install the eventscript before using it.
920 wait_for_monitor_event
()
924 echo "Waiting for a monitor event on node ${pnn}..."
925 ctdb_test_eventscript_flag create
$pnn "monitor"
927 wait_until
120 ! ctdb_test_eventscript_flag exists
$pnn "monitor"
931 #######################################
935 select_test_node_and_ips
937 nfs_first_export
=$
(showmount
-e $test_ip |
sed -n -e '2s/ .*//p')
939 echo "Creating test subdirectory..."
940 try_command_on_node
$test_node "mktemp -d --tmpdir=$nfs_first_export"
942 try_command_on_node
$test_node "chmod 777 $nfs_test_dir"
944 nfs_mnt_d
=$
(mktemp
-d)
945 nfs_local_file
="${nfs_mnt_d}/${nfs_test_dir##*/}/TEST_FILE"
946 nfs_remote_file
="${nfs_test_dir}/TEST_FILE"
948 ctdb_test_exit_hook_add nfs_test_cleanup
950 echo "Mounting ${test_ip}:${nfs_first_export} on ${nfs_mnt_d} ..."
951 mount
-o timeo
=1,hard
,intr
,vers
=3 \
952 ${test_ip}:${nfs_first_export} ${nfs_mnt_d}
957 rm -f "$nfs_local_file"
958 umount
-f "$nfs_mnt_d"
960 onnode
-q $test_node rmdir "$nfs_test_dir"
963 #######################################
965 # $1: pnn, $2: DB name
968 try_command_on_node
-v $1 $CTDB getdbstatus
"$2" |
969 sed -n -e "s@^path: @@p"
972 # $1: pnn, $2: DB name
973 db_ctdb_cattdb_count_records
()
975 try_command_on_node
-v $1 $CTDB cattdb
"$2" |
976 grep '^key' |
grep -v '__db_sequence_number__' |
980 # $1: pnn, $2: DB name, $3: key string, $4: value string, $5: RSN (default 7)
983 _tdb
=$
(db_get_path
$1 "$2")
985 try_command_on_node
$1 $CTDB tstore
"$_tdb" "$3" "$4" "$_rsn"
988 # $1: pnn, $2: DB name, $3: dbseqnum (must be < 255!!!!!)
989 db_ctdb_tstore_dbseqnum
()
991 # "__db_sequence_number__" + trailing 0x00
992 _key
='0x5f5f64625f73657175656e63655f6e756d6265725f5f00'
994 # Construct 8 byte (unit64_t) database sequence number. This
995 # probably breaks if $3 > 255
996 _value
=$
(printf "0x%02x%014x" $3 0)
998 db_ctdb_tstore
$1 "$2" "$_key" "$_value"
1001 #######################################
1003 # Make sure that $CTDB is set.
1006 local="${TEST_SUBDIR}/scripts/local.bash"
1007 if [ -r "$local" ] ; then