ceph-csi/scripts/rook.sh
ShyamsundarR e73921f268 Add e2e tests for topology based provisioning
- This commit adds tests only for RBD, as CephFS still needs
an enhancement in CephFS subvolume commands to effectively use
topology based provisioning

Signed-off-by: ShyamsundarR <srangana@redhat.com>
2020-04-14 14:14:29 +00:00

169 lines
5.3 KiB
Bash
Executable File

#!/bin/bash -e
ROOK_VERSION=${ROOK_VERSION:-"v1.1.7"}
ROOK_DEPLOY_TIMEOUT=${ROOK_DEPLOY_TIMEOUT:-300}
ROOK_URL="https://raw.githubusercontent.com/rook/rook/${ROOK_VERSION}/cluster/examples/kubernetes/ceph"
ROOK_BLOCK_POOL_NAME=${ROOK_BLOCK_POOL_NAME:-"newrbdpool"}
function deploy_rook() {
kubectl create -f "${ROOK_URL}/common.yaml"
kubectl create -f "${ROOK_URL}/operator.yaml"
kubectl create -f "${ROOK_URL}/cluster-test.yaml"
kubectl create -f "${ROOK_URL}/toolbox.yaml"
kubectl create -f "${ROOK_URL}/filesystem-test.yaml"
kubectl create -f "${ROOK_URL}/pool-test.yaml"
# Check if CephCluster is empty
if ! kubectl -n rook-ceph get cephclusters -oyaml | grep 'items: \[\]' &>/dev/null; then
check_ceph_cluster_health
fi
# Check if CephFileSystem is empty
if ! kubectl -n rook-ceph get cephfilesystems -oyaml | grep 'items: \[\]' &>/dev/null; then
check_mds_stat
fi
# Check if CephBlockPool is empty
if ! kubectl -n rook-ceph get cephblockpools -oyaml | grep 'items: \[\]' &>/dev/null; then
check_rbd_stat
fi
}
function teardown_rook() {
kubectl delete -f "${ROOK_URL}/pool-test.yaml"
kubectl delete -f "${ROOK_URL}/filesystem-test.yaml"
kubectl delete -f "${ROOK_URL}/toolbox.yaml"
kubectl delete -f "${ROOK_URL}/cluster-test.yaml"
kubectl delete -f "${ROOK_URL}/operator.yaml"
kubectl delete -f "${ROOK_URL}/common.yaml"
}
function create_block_pool() {
curl -o newpool.yaml "${ROOK_URL}/pool-test.yaml"
sed -i "s/replicapool/$ROOK_BLOCK_POOL_NAME/g" newpool.yaml
kubectl create -f "./newpool.yaml"
rm -f "./newpool.yaml"
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
echo "Checking RBD ($ROOK_BLOCK_POOL_NAME) stats... ${retry}s" && sleep 5
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
TOOLBOX_POD_STATUS=$(kubectl -n rook-ceph get pod "$TOOLBOX_POD" -ojsonpath='{.status.phase}')
[[ "$TOOLBOX_POD_STATUS" != "Running" ]] && \
{ echo "Toolbox POD ($TOOLBOX_POD) status: [$TOOLBOX_POD_STATUS]"; continue; }
if kubectl exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$ROOK_BLOCK_POOL_NAME" &>/dev/null; then
echo "RBD ($ROOK_BLOCK_POOL_NAME) is successfully created..."
break
fi
done
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then
echo "[Timeout] Failed to get RBD pool $ROOK_BLOCK_POOL_NAME stats"
exit 1
fi
echo ""
}
function delete_block_pool() {
curl -o newpool.yaml "${ROOK_URL}/pool-test.yaml"
sed -i "s/replicapool/$ROOK_BLOCK_POOL_NAME/g" newpool.yaml
kubectl delete -f "./newpool.yaml"
rm -f "./newpool.yaml"
}
function check_ceph_cluster_health() {
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
echo "Wait for rook deploy... ${retry}s" && sleep 5
CEPH_STATE=$(kubectl -n rook-ceph get cephclusters -o jsonpath='{.items[0].status.state}')
CEPH_HEALTH=$(kubectl -n rook-ceph get cephclusters -o jsonpath='{.items[0].status.ceph.health}')
echo "Checking CEPH cluster state: [$CEPH_STATE]"
if [ "$CEPH_STATE" = "Created" ]; then
if [ "$CEPH_HEALTH" = "HEALTH_OK" ]; then
echo "Creating CEPH cluster is done. [$CEPH_HEALTH]"
break
fi
fi
done
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then
echo "[Timeout] CEPH cluster not in a healthy state (timeout)"
exit 1
fi
echo ""
}
function check_mds_stat() {
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
FS_NAME=$(kubectl -n rook-ceph get cephfilesystems.ceph.rook.io -ojsonpath='{.items[0].metadata.name}')
echo "Checking MDS ($FS_NAME) stats... ${retry}s" && sleep 5
ACTIVE_COUNT=$(kubectl -n rook-ceph get cephfilesystems myfs -ojsonpath='{.spec.metadataServer.activeCount}')
ACTIVE_COUNT_NUM=$((ACTIVE_COUNT + 0))
echo "MDS ($FS_NAME) active_count: [$ACTIVE_COUNT_NUM]"
if ((ACTIVE_COUNT_NUM < 1)); then
continue
else
if kubectl -n rook-ceph get pod -l rook_file_system=myfs | grep Running &>/dev/null; then
echo "Filesystem ($FS_NAME) is successfully created..."
break
fi
fi
done
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then
echo "[Timeout] Failed to get ceph filesystem pods"
exit 1
fi
echo ""
}
function check_rbd_stat() {
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
RBD_POOL_NAME=$(kubectl -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}')
echo "Checking RBD ($RBD_POOL_NAME) stats... ${retry}s" && sleep 5
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
TOOLBOX_POD_STATUS=$(kubectl -n rook-ceph get pod "$TOOLBOX_POD" -ojsonpath='{.status.phase}')
[[ "$TOOLBOX_POD_STATUS" != "Running" ]] && \
{ echo "Toolbox POD ($TOOLBOX_POD) status: [$TOOLBOX_POD_STATUS]"; continue; }
if kubectl exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$RBD_POOL_NAME" &>/dev/null; then
echo "RBD ($RBD_POOL_NAME) is successfully created..."
break
fi
done
if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then
echo "[Timeout] Failed to get RBD pool stats"
exit 1
fi
echo ""
}
case "${1:-}" in
deploy)
deploy_rook
;;
teardown)
teardown_rook
;;
create-block-pool)
create_block_pool
;;
delete-block-pool)
delete_block_pool
;;
*)
echo " $0 [command]
Available Commands:
deploy Deploy a rook
teardown Teardown a rook
create-block-pool Create a rook block pool
delete-block-pool Delete a rook block pool
" >&2
;;
esac