diff --git a/scripts/rook.sh b/scripts/rook.sh index 4d732c270..1b650eaa5 100755 --- a/scripts/rook.sh +++ b/scripts/rook.sh @@ -86,6 +86,9 @@ function deploy_rook() { check_ceph_cluster_health fi + # Make sure Ceph Mgr is running + check_ceph_mgr + # Check if CephFileSystem is empty if ! kubectl_retry -n rook-ceph get cephfilesystems -oyaml | grep 'items: \[\]' &>/dev/null; then check_mds_stat @@ -166,6 +169,22 @@ function check_ceph_cluster_health() { echo "" } +function check_ceph_mgr() { + for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do + echo "Waiting for Ceph Mgr... ${retry}s" && sleep 5 + + MGR_POD=$(kubectl_retry -n rook-ceph get pods -l app=rook-ceph-mgr -o jsonpath='{.items[0].metadata.name}') + MGR_POD_STATUS=$(kubectl_retry -n rook-ceph get pod "$MGR_POD" -ojsonpath='{.status.phase}') + [[ "$MGR_POD_STATUS" = "Running" ]] && break + done + + if [ "$retry" -gt "$ROOK_DEPLOY_TIMEOUT" ]; then + echo "[Timeout] Ceph Mgr is not running (timeout)" + return 1 + fi + echo "" +} + function check_mds_stat() { for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do FS_NAME=$(kubectl_retry -n rook-ceph get cephfilesystems.ceph.rook.io -ojsonpath='{.items[0].metadata.name}')