mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
Fix allignment issue in shellscript
Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
50a84fb930
commit
520ceb6dcb
@ -61,8 +61,9 @@ build_push_images() {
|
||||
|
||||
# build and push per arch images
|
||||
for ARCH in amd64 arm64; do
|
||||
ifs=$IFS; IFS=
|
||||
digest=$(awk -v ARCH=${ARCH} '{if (archfound) {print $NF; exit 0}}; {archfound=($0 ~ "arch.*"ARCH)}' <<< "${manifests}")
|
||||
ifs=$IFS
|
||||
IFS=
|
||||
digest=$(awk -v ARCH=${ARCH} '{if (archfound) {print $NF; exit 0}}; {archfound=($0 ~ "arch.*"ARCH)}' <<<"${manifests}")
|
||||
IFS=$ifs
|
||||
sed -i "s|\(^FROM.*\)${baseimg}.*$|\1${baseimg}@${digest}|" "${dockerfile}"
|
||||
GOARCH=${ARCH} make push-image-cephcsi
|
||||
|
@ -102,13 +102,13 @@ ssh)
|
||||
;;
|
||||
deploy-rook)
|
||||
echo "deploy rook"
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
"$DIR"/rook.sh deploy
|
||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
"$DIR"/rook.sh deploy
|
||||
;;
|
||||
teardown-rook)
|
||||
echo "teardown rook"
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||
"$DIR"/rook.sh teardown
|
||||
DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||
"$DIR"/rook.sh teardown
|
||||
|
||||
# delete rook data for minikube
|
||||
minikube ssh "sudo rm -rf /mnt/${DISK}/var/lib/rook; sudo rm -rf /var/lib/rook"
|
||||
|
@ -5,40 +5,40 @@ ROOK_DEPLOY_TIMEOUT=${ROOK_DEPLOY_TIMEOUT:-300}
|
||||
ROOK_URL="https://raw.githubusercontent.com/rook/rook/${ROOK_VERSION}/cluster/examples/kubernetes/ceph"
|
||||
|
||||
function deploy_rook() {
|
||||
kubectl create -f "${ROOK_URL}/common.yaml"
|
||||
kubectl create -f "${ROOK_URL}/operator.yaml"
|
||||
kubectl create -f "${ROOK_URL}/cluster-test.yaml"
|
||||
kubectl create -f "${ROOK_URL}/toolbox.yaml"
|
||||
kubectl create -f "${ROOK_URL}/filesystem-test.yaml"
|
||||
kubectl create -f "${ROOK_URL}/pool-test.yaml"
|
||||
kubectl create -f "${ROOK_URL}/common.yaml"
|
||||
kubectl create -f "${ROOK_URL}/operator.yaml"
|
||||
kubectl create -f "${ROOK_URL}/cluster-test.yaml"
|
||||
kubectl create -f "${ROOK_URL}/toolbox.yaml"
|
||||
kubectl create -f "${ROOK_URL}/filesystem-test.yaml"
|
||||
kubectl create -f "${ROOK_URL}/pool-test.yaml"
|
||||
|
||||
# Check if CephCluster is empty
|
||||
if ! kubectl -n rook-ceph get cephclusters -oyaml | grep 'items: \[\]' &> /dev/null; then
|
||||
if ! kubectl -n rook-ceph get cephclusters -oyaml | grep 'items: \[\]' &>/dev/null; then
|
||||
check_ceph_cluster_health
|
||||
fi
|
||||
|
||||
# Check if CephFileSystem is empty
|
||||
if ! kubectl -n rook-ceph get cephfilesystems -oyaml | grep 'items: \[\]' &> /dev/null; then
|
||||
if ! kubectl -n rook-ceph get cephfilesystems -oyaml | grep 'items: \[\]' &>/dev/null; then
|
||||
check_mds_stat
|
||||
fi
|
||||
|
||||
# Check if CephBlockPool is empty
|
||||
if ! kubectl -n rook-ceph get cephblockpools -oyaml | grep 'items: \[\]' &> /dev/null; then
|
||||
if ! kubectl -n rook-ceph get cephblockpools -oyaml | grep 'items: \[\]' &>/dev/null; then
|
||||
check_rbd_stat
|
||||
fi
|
||||
}
|
||||
|
||||
function teardown_rook() {
|
||||
kubectl delete -f "${ROOK_URL}/pool-test.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/filesystem-test.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/toolbox.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/cluster-test.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/operator.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/common.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/pool-test.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/filesystem-test.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/toolbox.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/cluster-test.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/operator.yaml"
|
||||
kubectl delete -f "${ROOK_URL}/common.yaml"
|
||||
}
|
||||
|
||||
function check_ceph_cluster_health(){
|
||||
for ((retry=0; retry<=ROOK_DEPLOY_TIMEOUT; retry=retry+5)); do
|
||||
function check_ceph_cluster_health() {
|
||||
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
|
||||
echo "Wait for rook deploy... ${retry}s" && sleep 5
|
||||
|
||||
CEPH_STATE=$(kubectl -n rook-ceph get cephclusters -o jsonpath='{.items[0].status.state}')
|
||||
@ -47,15 +47,15 @@ function check_ceph_cluster_health(){
|
||||
if [ "$CEPH_STATE" = "Created" ]; then
|
||||
if [ "$CEPH_HEALTH" = "HEALTH_OK" ]; then
|
||||
echo "Creating CEPH cluster is done. [$CEPH_HEALTH]"
|
||||
break;
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
}
|
||||
|
||||
function check_mds_stat(){
|
||||
for ((retry=0; retry<=ROOK_DEPLOY_TIMEOUT; retry=retry+5)); do
|
||||
function check_mds_stat() {
|
||||
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
|
||||
FS_NAME=$(kubectl -n rook-ceph get cephfilesystems.ceph.rook.io -ojsonpath='{.items[0].metadata.name}')
|
||||
echo "Checking MDS ($FS_NAME) stats... ${retry}s" && sleep 5
|
||||
|
||||
@ -63,20 +63,20 @@ function check_mds_stat(){
|
||||
|
||||
ACTIVE_COUNT_NUM=$((ACTIVE_COUNT + 0))
|
||||
echo "MDS ($FS_NAME) active_count: [$ACTIVE_COUNT_NUM]"
|
||||
if (( ACTIVE_COUNT_NUM < 1 )); then
|
||||
continue;
|
||||
if ((ACTIVE_COUNT_NUM < 1)); then
|
||||
continue
|
||||
else
|
||||
if kubectl -n rook-ceph get pod -l rook_file_system=myfs | grep Running &> /dev/null; then
|
||||
if kubectl -n rook-ceph get pod -l rook_file_system=myfs | grep Running &>/dev/null; then
|
||||
echo "Filesystem ($FS_NAME) is successfully created..."
|
||||
break;
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
echo ""
|
||||
}
|
||||
|
||||
function check_rbd_stat(){
|
||||
for ((retry=0; retry<=ROOK_DEPLOY_TIMEOUT; retry=retry+5)); do
|
||||
function check_rbd_stat() {
|
||||
for ((retry = 0; retry <= ROOK_DEPLOY_TIMEOUT; retry = retry + 5)); do
|
||||
RBD_POOL_NAME=$(kubectl -n rook-ceph get cephblockpools -ojsonpath='{.items[0].metadata.name}')
|
||||
echo "Checking RBD ($RBD_POOL_NAME) stats... ${retry}s" && sleep 5
|
||||
|
||||
@ -85,7 +85,7 @@ function check_rbd_stat(){
|
||||
echo "Toolbox POD ($TOOLBOX_POD) status: [$TOOLBOX_POD_STATUS]"
|
||||
[[ "$TOOLBOX_POD_STATUS" != "Running" ]] && continue
|
||||
|
||||
if kubectl exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$RBD_POOL_NAME" &> /dev/null; then
|
||||
if kubectl exec -n rook-ceph "$TOOLBOX_POD" -it -- rbd pool stats "$RBD_POOL_NAME" &>/dev/null; then
|
||||
echo "RBD ($RBD_POOL_NAME) is successfully created..."
|
||||
break
|
||||
fi
|
||||
@ -95,16 +95,16 @@ function check_rbd_stat(){
|
||||
|
||||
case "${1:-}" in
|
||||
deploy)
|
||||
deploy_rook
|
||||
;;
|
||||
deploy_rook
|
||||
;;
|
||||
teardown)
|
||||
teardown_rook
|
||||
;;
|
||||
teardown_rook
|
||||
;;
|
||||
*)
|
||||
echo " $0 [command]
|
||||
echo " $0 [command]
|
||||
Available Commands:
|
||||
deploy Deploy a rook
|
||||
teardown Teardown a rook
|
||||
" >&2
|
||||
;;
|
||||
;;
|
||||
esac
|
||||
|
Loading…
Reference in New Issue
Block a user