smbd: brl_lock_cancel does not need "blr" anymore
[Samba.git] / ctdb / tests / complex / 43_failover_nfs_basic.sh
bloba68f7db6811eda8e14f0257c990f496ef442d0cc
1 #!/bin/bash
3 test_info()
5 cat <<EOF
6 Verify that a mounted NFS share is still operational after failover.
8 We mount an NFS share from a node, write a file via NFS and then
9 confirm that we can correctly read the file after a failover.
11 Prerequisites:
13 * An active CTDB cluster with at least 2 nodes with public addresses.
15 * Test must be run on a real or virtual cluster rather than against
16 local daemons.
18 * Test must not be run from a cluster node.
20 Steps:
22 1. Verify that the cluster is healthy.
23 2. Select a public address and its corresponding node.
24 3. Select the 1st NFS share exported on the node.
25 4. Mount the selected NFS share.
26 5. Create a file in the NFS mount and calculate its checksum.
27 6. Disable the selected node.
28 7. Read the file and calculate its checksum.
29 8. Compare the checksums.
31 Expected results:
33 * When a node is disabled the public address fails over and it is
34 possible to correctly read a file over NFS. The checksums should be
35 the same before and after.
36 EOF
39 . "${TEST_SCRIPTS_DIR}/integration.bash"
41 set -e
43 ctdb_test_init "$@"
45 ctdb_test_check_real_cluster
47 cluster_is_healthy
49 # Reset configuration
50 ctdb_restart_when_done
52 nfs_test_setup
54 echo "Create file containing random data..."
55 dd if=/dev/urandom of=$nfs_local_file bs=1k count=1
56 original_sum=$(sum $nfs_local_file)
57 [ $? -eq 0 ]
59 gratarp_sniff_start
61 echo "Disabling node $test_node"
62 try_command_on_node 0 $CTDB disable -n $test_node
63 wait_until_node_has_status $test_node disabled
65 gratarp_sniff_wait_show
67 new_sum=$(sum $nfs_local_file)
68 [ $? -eq 0 ]
70 if [ "$original_md5" = "$new_md5" ] ; then
71 echo "GOOD: file contents unchanged after failover"
72 else
73 echo "BAD: file contents are different after failover"
74 testfailures=1