Skip to content

Commit

Permalink
Dev: run-functional-tests: "-n" option can grow more nodes
Browse files Browse the repository at this point in the history
  • Loading branch information
zzhou1 committed Sep 3, 2023
1 parent 31ab66c commit 14bfe4a
Showing 1 changed file with 110 additions and 60 deletions.
170 changes: 110 additions & 60 deletions test/run-functional-tests
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ HA_NETWORK_V6_ARRAY[1]="2001:db8:20::/64"
BEHAVE_CASE_DIR="$(dirname $0)/features/"
BEHAVE_CASE_EXCLUDE="sbd|ocfs2"

declare -a hanode_list_to_form_cluster
declare -a hanode_list_new_members
declare -a hanode_list_current_cluster

read -r -d '' SSHD_CONFIG_AZURE << EOM
PermitRootLogin no
AuthorizedKeysFile .ssh/authorized_keys
Expand Down Expand Up @@ -105,7 +109,7 @@ check_docker_env() {
for network in ${HA_NETWORK_ARRAY[@]};do
docker network ls|grep -q "$network"
if [ "$?" -eq 0 ];then
fatal "HA specific network \"$network\" already exists"
warning "HA specific network \"$network\" already exists"
fi
done
}
Expand Down Expand Up @@ -140,22 +144,29 @@ Users can make the code change under crmsh.git including test cases. This tool w
OPTIONS:
-h, --help Show this help message and exit
-l List existing functional test cases and exit
-n NUM Only setup a cluster with NUM nodes(containers)
-l List existing functional test cases and exit
-n NUM NUM of nodes(containers) from hanode1 to hanode$NUM
-x Don't config corosync on containers(with -n option)
-d Cleanup the cluster containers
-u Create normal users, and Azure like ssh environment
-u Run test as a normal user and enforce sshd_config to be close as Public Cloud, eg. Azure
-q Create a qnetd node(with -n and -x option)
EXAMPLES:
To launch 2 nodes with the running cluster with the very basic corosync.conf
# crmsh.git/test/run-functional-tests -n 2
To launch 2 nodes without the cluster stack running to play with "crm cluster init/join"
# crmsh.git/run-functional-tests -n 2 -x
To grow more nodes with a bigger number than '2' in the above example, and skip existing nodes
# crmsh.git/test/run-functional-tests -n 5
To launch 2 nodes without the running cluster, for use cases to play with "crm cluster init/join"
# crmsh.git/test/run-functional-tests -n 2 -x
To grow more nodes without configure the cluster stack
# crmsh.git/test/run-functional-tests -n 7 -x
To launch 2 nodes without the cluster stack running, and a qnetd node(named 'qnetd-node')
# crmsh.git/run-functional-tests -n 2 -x -q
To launch 2 nodes without the running cluster, and a qnetd node(named 'qnetd-node')
# crmsh.git/test/run-functional-tests -n 2 -x -q
To list the existing test cases. Users could add his own new test cases.
# crmsh.git/test/run-functional-tests -l
Expand Down Expand Up @@ -212,8 +223,12 @@ deploy_ha_node() {

info "Deploying \"$node_name\"..."
docker run --restart always $docker_options $DOCKER_IMAGE &> /dev/null
if [ $? -ne 0 ]; then
warning Likely $node_name already exists.
return
fi
for network in ${HA_NETWORK_ARRAY[@]};do
docker network connect $network $node_name
docker network connect $network $node_name &> /dev/null
done

if [ "$node_name" != "qnetd-node" ];then
Expand All @@ -224,29 +239,26 @@ deploy_ha_node() {
docker_exec $node_name "echo 'StrictHostKeyChecking no' >> /etc/ssh/ssh_config"

if [ "$node_name" != "qnetd-node" ];then
docker cp $PROJECT_PATH $node_name:/opt/crmsh
info "Building crmsh on \"$node_name\"..."
docker_exec $node_name "$make_cmd" 1> /dev/null || \
docker cp $PROJECT_PATH $node_name:/opt/crmsh
info "Building crmsh on \"$node_name\"..."
docker_exec $node_name "$make_cmd" 1> /dev/null || \
fatal "Building failed on $node_name!"
docker_exec $node_name "chown hacluster:haclient -R /var/log/crmsh"
docker_exec $node_name "chmod g+w -R /var/log/crmsh"
create_alice_bob_carol
if [ "$NORMAL_USER_FLAG" -eq 1 ];then
set_sshd_config_like_in_azure $node_name
fi
docker_exec $node_name "chown hacluster:haclient -R /var/log/crmsh"
docker_exec $node_name "chmod g+w -R /var/log/crmsh"
create_alice_bob_carol
else
docker_exec $node_name "useradd -m -s /bin/bash alice 2>/dev/null"
docker_exec $node_name "echo \"alice ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/alice"
docker_exec $node_name "cp -r /root/.ssh ~alice/ && chown alice:users -R ~alice/.ssh"
info "Create user 'alice' on $node_name"
[ "$NORMAL_USER_FLAG" -eq 1 ] && set_sshd_config_like_in_azure $node_name
docker_exec $node_name "useradd -m -s /bin/bash alice 2>/dev/null"
docker_exec $node_name "echo \"alice ALL=(ALL) NOPASSWD:ALL\" > /etc/sudoers.d/alice"
docker_exec $node_name "cp -r /root/.ssh ~alice/ && chown alice:users -R ~alice/.ssh"
info "Create user 'alice' on $node_name"
fi
[ "$NORMAL_USER_FLAG" -eq 1 ] && set_sshd_config_like_in_azure $node_name
}


create_node() {
info "Loading docker image $DOCKER_IMAGE..."
docker pull $DOCKER_IMAGE &> /dev/null
docker pull $DOCKER_IMAGE &> /dev/null

for index in ${!HA_NETWORK_ARRAY[@]};do
network=${HA_NETWORK_ARRAY[$index]}
Expand All @@ -260,40 +272,67 @@ create_node() {
wait
}

get_cluster_new_nodes() {
hanode_list_to_form_cluster=($(docker ps -a --format '{{.Names}}'|grep hanode|sort -n -k1.7|tr '\r' ' '))
hanode_list_current_cluster=($(docker_exec hanode1 "crm node server 2>/dev/null" 2>/dev/null|sort -n -k1.7|tr '\r' ' '))
hanode_list_new_members=()
for element in "${hanode_list_to_form_cluster[@]}"; do
if ! [[ " ${hanode_list_current_cluster[@]} " =~ " $element " ]]; then
hanode_list_new_members+=("$element")
fi
done
}

config_cluster() {
node_num=$#
insert_str=""
container_ip_array=(`docker network inspect $HA_NETWORK_ARRAY -f '{{range .Containers}}{{printf "%s " .IPv4Address}}{{end}}'`)
get_cluster_new_nodes

for i in $(seq $node_num -1 1);do
ip=`echo ${container_ip_array[$((i-1))]}|awk -F/ '{print $1}'`
if [ ${#hanode_list_new_members[@]} -eq 0 ]; then
return
else
info ${#hanode_list_new_members[@]} new node\(s\) "'${hanode_list_new_members[@]}'"
fi

insert_str=""
for i in $(seq 1 ${#hanode_list_to_form_cluster[@]});do
node=${hanode_list_to_form_cluster[$((i-1))]}
ip=$(docker network inspect "$HA_NETWORK_ARRAY" --format '{{range .Containers}}{{if eq .Name "'"${node}"'"}}{{.IPv4Address}}{{end}}{{end}}'|awk -F/ '{print $1}')
insert_str+="\\n\\tnode {\n\t\tring0_addr: $ip\n\t\tnodeid: $i\n\t}"
done

corosync_conf_str=$(sed "/nodelist/a \\${insert_str}" <(echo "$COROSYNC_CONF_TEMPLATE"))
if [ $node_num -eq 2 ];then

if [ ${#hanode_list_to_form_cluster[@]} -eq 2 ];then
corosync_conf_str=$(sed "/corosync_votequorum/a \\\\ttwo_node: 1" <(echo "$corosync_conf_str"))
fi
docker_exec "hanode1" "echo \"$corosync_conf_str\" > $COROSYNC_CONF"
if is_container_existing "qnetd-node";then
info "Generate corosync.conf without qdevice/qnetd for the cluster hanode{1..${#hanode_list_to_form_cluster[@]}}"
else
info "Generate corosync.conf for the cluster hanode{1..${#hanode_list_to_form_cluster[@]}}"
fi

info "Copy corosync.conf to $*"
for node in $*;do
if [ $node == $1 ];then
docker_exec $1 "echo \"$corosync_conf_str\" >> $COROSYNC_CONF"
docker_exec $1 "corosync-keygen -l -k $COROSYNC_AUTH &> /dev/null"
info "Copy corosync.conf to all cluster nodes hanode{1..${#hanode_list_to_form_cluster[@]}} "
for node in ${hanode_list_to_form_cluster[@]};do
if [ $node == "hanode1" ];then
docker_exec "hanode1" "corosync-keygen -l -k $COROSYNC_AUTH &> /dev/null"
else
while :
do
docker_exec $1 "ssh -T -o Batchmode=yes $node true &> /dev/null" && break
docker_exec "hanode1" "ssh -T -o Batchmode=yes $node true &> /dev/null" && break
sleep 1
done
docker_exec $1 "scp -p $COROSYNC_CONF $COROSYNC_AUTH $node:/etc/corosync &> /dev/null"
docker_exec "hanode1" "scp -p $COROSYNC_CONF $COROSYNC_AUTH $node:/etc/corosync &> /dev/null"
fi
done
}


start_cluster() {
for node in $*;do
if [ ${#hanode_list_current_cluster[@]} -ne 0 ] && [ ${#hanode_list_new_members[@]} -ne 0 ]; then
docker_exec hanode1 "corosync-cfgtool -R > /dev/null"
info On the existing cluster hanode{1..${#hanode_list_current_cluster[@]}}: Reloading corosync.conf... Done
fi

for node in ${hanode_list_new_members[@]};do
docker_exec $node "crm cluster enable && crm cluster start" 1> /dev/null
if [ "$?" -eq 0 ];then
info "Cluster service started on \"$node\""
Expand All @@ -303,35 +342,46 @@ start_cluster() {
done
}


container_already_exists() {
docker ps -a|grep -q "$1"
if [ "$?" -eq 0 ];then
fatal "Container \"$1\" already running"
fi
is_container_existing() {
docker ps -a --format '{{.Names}}' | grep -q "^$1$"
}


setup_cluster() {
hanodes_arry=()
is_number $1
if [ "$?" -eq 0 ];then
for i in $(seq 1 $1);do
hanodes_arry+=("hanode$i")
done
get_cluster_new_nodes

hanodes_array=()
if is_number "$1"; then
# add more nodes after the last node, ordered by the node name
if [ ${#hanode_list_to_form_cluster[@]} -gt 0 ]; then
last_node_num="${hanode_list_to_form_cluster[-1]:6}"
warning Skip creating the existing cluster nodes: hanode{1..${#hanode_list_to_form_cluster[@]}}
else
last_node_num=0
fi
num_of_new_nodes=$(( $1 - ${#hanode_list_to_form_cluster[@]} ))
if [ "$num_of_new_nodes" -gt 0 ]; then
for i in $(seq $(( last_node_num + 1 )) $(( last_node_num + num_of_new_nodes )) ); do
hanodes_array+=("hanode$i")
done
elif [ $WITH_QNETD_NODE -eq 0 ];then
return
fi
else
hanodes_arry=($*)
hanodes_array=($*)
fi

if [ $WITH_QNETD_NODE -eq 1 ];then
create_node ${hanodes_arry[@]} "qnetd-node"
if [ $WITH_QNETD_NODE -eq 1 ] && ! is_container_existing "qnetd-node";then
create_node ${hanodes_array[@]} "qnetd-node"
else
create_node ${hanodes_arry[@]}
[ $WITH_QNETD_NODE -eq 1 ] && warning Skip creating the existing qnetd-node
[ "$num_of_new_nodes" -eq 0 ] && return
create_node ${hanodes_array[@]}
fi

[ "$CONFIG_COROSYNC_FLAG" -eq 0 ] && return
config_cluster ${hanodes_arry[@]}
start_cluster ${hanodes_arry[@]}
config_cluster
start_cluster
docker_exec "hanode1" "crm configure property stonith-enabled=false"
}

Expand Down Expand Up @@ -410,7 +460,7 @@ run_origin_regression_test() {

prepare_coverage_env() {
for node in $*; do
docker exec -t $node /bin/sh -c 'sed -i '\''1a\import coverage\nimport atexit\ncov=coverage.Coverage(config_file="/opt/crmsh/test/features/coveragerc")\natexit.register(lambda:(cov.stop(),cov.save()))\ncov.start()'\'' /usr/sbin/crm'
docker exec -t $node /bin/sh -c 'sed -i '\''1a\import coverage\nimport atexit\ncov=coverage.Coverage(config_file="/opt/crmsh/test/features/coveragerc")\natexit.register(lambda:(cov.stop(),cov.save()))\ncov.start()'\'' /usr/sbin/crm'
done
}

Expand Down Expand Up @@ -517,13 +567,13 @@ for case_num in $*;do
setup_cluster ${node_arry[@]}
adjust_test_case ${node_arry[0]} $case_file_in_container
echo
prepare_coverage_env "${node_arry[@]}"
prepare_coverage_env "${node_arry[@]}"
if [ "$NORMAL_USER_FLAG" -eq 0 ];then
info "Running \"$case_file_in_container\" under 'root'..."
docker_exec ${node_arry[0]} "behave --no-logcapture $case_file_in_container || exit 1" || exit 1
else
info "Running \"$case_file_in_container\" under normal user 'alice'..."
docker_exec ${node_arry[0]} "su - alice -c 'sudo behave --no-logcapture $case_file_in_container || exit 1'" || exit 1
docker_exec ${node_arry[0]} "su - alice -c 'sudo behave --no-logcapture $case_file_in_container || exit 1'" || exit 1
fi
fetch_coverage_report "${node_arry[@]}"
echo
Expand Down

0 comments on commit 14bfe4a

Please sign in to comment.