mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-22 22:30:23 +00:00
Rook deploy script, Adding rbd_pool check and cephfilesystem check.
This commit is contained in:
parent
6a8ddad669
commit
327fcd1b1b
@ -102,11 +102,13 @@ ssh)
|
|||||||
;;
|
;;
|
||||||
deploy-rook)
|
deploy-rook)
|
||||||
echo "deploy rook"
|
echo "deploy rook"
|
||||||
./scripts/rook.sh deploy
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
|
"$DIR"/rook.sh deploy
|
||||||
;;
|
;;
|
||||||
teardown-rook)
|
teardown-rook)
|
||||||
echo "teardown rook"
|
echo "teardown rook"
|
||||||
./scripts/rook.sh teardown
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
|
"$DIR"/rook.sh teardown
|
||||||
|
|
||||||
# delete rook data for minikube
|
# delete rook data for minikube
|
||||||
minikube ssh "sudo rm -rf /mnt/${DISK}/var/lib/rook; sudo rm -rf /var/lib/rook"
|
minikube ssh "sudo rm -rf /mnt/${DISK}/var/lib/rook; sudo rm -rf /var/lib/rook"
|
||||||
|
@ -12,14 +12,20 @@ function deploy_rook() {
|
|||||||
kubectl create -f "${ROOK_URL}/filesystem-test.yaml"
|
kubectl create -f "${ROOK_URL}/filesystem-test.yaml"
|
||||||
kubectl create -f "${ROOK_URL}/pool-test.yaml"
|
kubectl create -f "${ROOK_URL}/pool-test.yaml"
|
||||||
|
|
||||||
for ((retry=0; retry<=ROOK_DEPLOY_TIMEOUT; retry=retry+5)); do
|
# Check if CephCluster is empty
|
||||||
echo "Wait for rook deploy... ${retry}s"
|
if ! kubectl -n rook-ceph get cephclusters -oyaml | grep 'items: \[\]' &> /dev/null; then
|
||||||
sleep 5
|
check_ceph_cluster_health
|
||||||
|
fi
|
||||||
|
|
||||||
if kubectl get cephclusters -n rook-ceph | grep HEALTH_OK &> /dev/null; then
|
# Check if CephFileSystem is empty
|
||||||
break
|
if ! kubectl -n rook-ceph get cephfilesystems -oyaml | grep 'items: \[\]' &> /dev/null; then
|
||||||
fi
|
check_mds_stat
|
||||||
done
|
fi
|
||||||
|
|
||||||
|
# Check if CephBlockPool is empty
|
||||||
|
if ! kubectl -n rook-ceph get cephblockpools -oyaml | grep 'items: \[\]' &> /dev/null; then
|
||||||
|
check_rbd_stat
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function teardown_rook() {
|
function teardown_rook() {
|
||||||
@ -31,6 +37,62 @@ function teardown_rook() {
|
|||||||
kubectl delete -f "${ROOK_URL}/common.yaml"
|
kubectl delete -f "${ROOK_URL}/common.yaml"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function check_ceph_cluster_health(){
|
||||||
|
for ((retry=0; retry<=ROOK_DEPLOY_TIMEOUT; retry=retry+5)); do
|
||||||
|
echo "Wait for rook deploy... ${retry}s" && sleep 5
|
||||||
|
|
||||||
|
CEPH_STATE=$(kubectl -n rook-ceph get cephclusters -o jsonpath='{.items[0].status.state}')
|
||||||
|
CEPH_HEALTH=$(kubectl -n rook-ceph get cephclusters -o jsonpath='{.items[0].status.ceph.health}')
|
||||||
|
echo "Checking CEPH cluster state: [$CEPH_STATE]"
|
||||||
|
if [ "$CEPH_STATE" = "Created" ]; then
|
||||||
|
if [ "$CEPH_HEALTH" = "HEALTH_OK" ]; then
|
||||||
|
echo "Creating CEPH cluster is done. [$CEPH_HEALTH]"
|
||||||
|
break;
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
function check_mds_stat(){
|
||||||
|
for ((retry=0; retry<=ROOK_DEPLOY_TIMEOUT; retry=retry+5)); do
|
||||||
|
FS_NAME=$(kubectl -n rook-ceph get cephfilesystems.ceph.rook.io -ojsonpath='{.items[0].metadata.name}')
|
||||||
|
echo "Checking MDS ($FS_NAME) stats... ${retry}s" && sleep 5
|
||||||
|
|
||||||
|
ACTIVE_COUNT=$(kubectl -n rook-ceph get cephfilesystems myfs -ojsonpath='{.spec.metadataServer.activeCount}')
|
||||||
|
|
||||||
|
ACTIVE_COUNT_NUM=$((ACTIVE_COUNT + 0))
|
||||||
|
echo "MDS ($FS_NAME) active_count: [$ACTIVE_COUNT_NUM]"
|
||||||
|
if (( ACTIVE_COUNT_NUM < 1 )); then
|
||||||
|
continue;
|
||||||
|
else
|
||||||
|
if kubectl -n rook-ceph get pod -l rook_file_system=myfs | grep Running &> /dev/null; then
|
||||||
|
echo "Filesystem ($FS_NAME) is successfully created..."
|
||||||
|
break;
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
|
function check_rbd_stat(){
|
||||||
|
for ((retry=0; retry<=ROOK_DEPLOY_TIMEOUT; retry=retry+5)); do
|
||||||
|
RBD_POOL_NAME=$(kubectl -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}')
|
||||||
|
echo "Checking RBD ($RBD_POOL_NAME) stats... ${retry}s" && sleep 5
|
||||||
|
|
||||||
|
TOOLBOX_POD=$(kubectl -n rook-ceph get pods -l app=rook-ceph-tools -o jsonpath='{.items[0].metadata.name}')
|
||||||
|
TOOLBOX_POD_STATUS=$(kubectl -n rook-ceph get pod "$TOOLBOX_POD" -ojsonpath='{.status.phase}')
|
||||||
|
echo "Toolbox POD ($TOOLBOX_POD) status: [$TOOLBOX_POD_STATUS]"
|
||||||
|
[[ "$TOOLBOX_POD_STATUS" != "Running" ]] && continue
|
||||||
|
|
||||||
|
if kubectl exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$RBD_POOL_NAME" &> /dev/null; then
|
||||||
|
echo "RBD ($RBD_POOL_NAME) is successfully created..."
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
}
|
||||||
|
|
||||||
case "${1:-}" in
|
case "${1:-}" in
|
||||||
deploy)
|
deploy)
|
||||||
deploy_rook
|
deploy_rook
|
||||||
|
Loading…
Reference in New Issue
Block a user