mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-23 23:00:19 +00:00
ci: use kubectl_retry in install_helm.sh script
Signed-off-by: Rakshith R <rar@redhat.com>
(cherry picked from commit eb8c1cd5ab
)
# Conflicts:
# scripts/install-helm.sh
This commit is contained in:
parent
661602d731
commit
c9eb7bce7c
@ -1,4 +1,4 @@
|
|||||||
#!/bin/bash -e
|
#!/bin/bash -E
|
||||||
|
|
||||||
#Based on ideas from https://github.com/rook/rook/blob/master/tests/scripts/helm.sh
|
#Based on ideas from https://github.com/rook/rook/blob/master/tests/scripts/helm.sh
|
||||||
|
|
||||||
@ -7,6 +7,8 @@ TEMP="/tmp/cephcsi-helm-test"
|
|||||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
|
||||||
# shellcheck source=build.env
|
# shellcheck source=build.env
|
||||||
[ ! -e "${SCRIPT_DIR}"/../build.env ] || source "${SCRIPT_DIR}"/../build.env
|
[ ! -e "${SCRIPT_DIR}"/../build.env ] || source "${SCRIPT_DIR}"/../build.env
|
||||||
|
# shellcheck disable=SC1091
|
||||||
|
[ ! -e "${SCRIPT_DIR}"/utils.sh ] || source "${SCRIPT_DIR}"/utils.sh
|
||||||
|
|
||||||
HELM="helm"
|
HELM="helm"
|
||||||
HELM_VERSION=${HELM_VERSION:-"latest"}
|
HELM_VERSION=${HELM_VERSION:-"latest"}
|
||||||
@ -47,12 +49,12 @@ check_deployment_status() {
|
|||||||
NAMESPACE=$2
|
NAMESPACE=$2
|
||||||
echo "Checking Deployment status for label $LABEL in Namespace $NAMESPACE"
|
echo "Checking Deployment status for label $LABEL in Namespace $NAMESPACE"
|
||||||
for ((retry = 0; retry <= DEPLOY_TIMEOUT; retry = retry + 5)); do
|
for ((retry = 0; retry <= DEPLOY_TIMEOUT; retry = retry + 5)); do
|
||||||
total_replicas=$(kubectl get deployment -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.replicas}')
|
total_replicas=$(kubectl_retry get deployment -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.replicas}')
|
||||||
|
|
||||||
ready_replicas=$(kubectl get deployment -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.readyReplicas}')
|
ready_replicas=$(kubectl_retry get deployment -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.readyReplicas}')
|
||||||
if [ "$total_replicas" != "$ready_replicas" ]; then
|
if [ "$total_replicas" != "$ready_replicas" ]; then
|
||||||
echo "Total replicas $total_replicas is not equal to ready count $ready_replicas"
|
echo "Total replicas $total_replicas is not equal to ready count $ready_replicas"
|
||||||
kubectl get deployment -l "$LABEL" -n "$NAMESPACE"
|
kubectl_retry get deployment -l "$LABEL" -n "$NAMESPACE"
|
||||||
sleep 10
|
sleep 10
|
||||||
else
|
else
|
||||||
echo "Total replicas $total_replicas is equal to ready count $ready_replicas"
|
echo "Total replicas $total_replicas is equal to ready count $ready_replicas"
|
||||||
@ -71,12 +73,12 @@ check_daemonset_status() {
|
|||||||
NAMESPACE=$2
|
NAMESPACE=$2
|
||||||
echo "Checking Daemonset status for label $LABEL in Namespace $NAMESPACE"
|
echo "Checking Daemonset status for label $LABEL in Namespace $NAMESPACE"
|
||||||
for ((retry = 0; retry <= DEPLOY_TIMEOUT; retry = retry + 5)); do
|
for ((retry = 0; retry <= DEPLOY_TIMEOUT; retry = retry + 5)); do
|
||||||
total_replicas=$(kubectl get daemonset -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.numberAvailable}')
|
total_replicas=$(kubectl_retry get daemonset -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.numberAvailable}')
|
||||||
|
|
||||||
ready_replicas=$(kubectl get daemonset -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.numberReady}')
|
ready_replicas=$(kubectl_retry get daemonset -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.numberReady}')
|
||||||
if [ "$total_replicas" != "$ready_replicas" ]; then
|
if [ "$total_replicas" != "$ready_replicas" ]; then
|
||||||
echo "Total replicas $total_replicas is not equal to ready count $ready_replicas"
|
echo "Total replicas $total_replicas is not equal to ready count $ready_replicas"
|
||||||
kubectl get daemonset -l "$LABEL" -n "$NAMESPACE"
|
kubectl_retry get daemonset -l "$LABEL" -n "$NAMESPACE"
|
||||||
sleep 10
|
sleep 10
|
||||||
else
|
else
|
||||||
echo "Total replicas $total_replicas is equal to ready count $ready_replicas"
|
echo "Total replicas $total_replicas is equal to ready count $ready_replicas"
|
||||||
@ -132,9 +134,9 @@ install_cephcsi_helm_charts() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# label the nodes uniformly for domain information
|
# label the nodes uniformly for domain information
|
||||||
for node in $(kubectl get node -o jsonpath='{.items[*].metadata.name}'); do
|
for node in $(kubectl_retry get node -o jsonpath='{.items[*].metadata.name}'); do
|
||||||
kubectl label node/"${node}" ${NODE_LABEL_REGION}=${REGION_VALUE}
|
kubectl_retry label node/"${node}" ${NODE_LABEL_REGION}=${REGION_VALUE}
|
||||||
kubectl label node/"${node}" ${NODE_LABEL_ZONE}=${ZONE_VALUE}
|
kubectl_retry label node/"${node}" ${NODE_LABEL_ZONE}=${ZONE_VALUE}
|
||||||
done
|
done
|
||||||
|
|
||||||
# install ceph-csi-cephfs and ceph-csi-rbd charts
|
# install ceph-csi-cephfs and ceph-csi-rbd charts
|
||||||
@ -145,9 +147,8 @@ install_cephcsi_helm_charts() {
|
|||||||
|
|
||||||
# deleting configmap as a workaround to avoid configmap already present
|
# deleting configmap as a workaround to avoid configmap already present
|
||||||
# issue when installing ceph-csi-rbd
|
# issue when installing ceph-csi-rbd
|
||||||
kubectl delete cm ceph-csi-config --namespace ${NAMESPACE}
|
kubectl_retry delete cm ceph-csi-config --namespace ${NAMESPACE}
|
||||||
"${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true --set provisioner.replicaCount=1 ${RBD_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --set topology.enabled=true --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}" --set provisioner.maxSnapshotsOnImage=3 --set provisioner.minSnapshotsOnImage=2
|
"${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true --set provisioner.replicaCount=1 ${RBD_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --set topology.enabled=true --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}" --set provisioner.maxSnapshotsOnImage=3 --set provisioner.minSnapshotsOnImage=2
|
||||||
|
|
||||||
check_deployment_status app=ceph-csi-rbd ${NAMESPACE}
|
check_deployment_status app=ceph-csi-rbd ${NAMESPACE}
|
||||||
check_daemonset_status app=ceph-csi-rbd ${NAMESPACE}
|
check_daemonset_status app=ceph-csi-rbd ${NAMESPACE}
|
||||||
|
|
||||||
@ -155,9 +156,9 @@ install_cephcsi_helm_charts() {
|
|||||||
|
|
||||||
cleanup_cephcsi_helm_charts() {
|
cleanup_cephcsi_helm_charts() {
|
||||||
# remove set labels
|
# remove set labels
|
||||||
for node in $(kubectl get node --no-headers | cut -f 1 -d ' '); do
|
for node in $(kubectl_retry get node --no-headers | cut -f 1 -d ' '); do
|
||||||
kubectl label node/"$node" test.failure-domain/region-
|
kubectl_retry label node/"$node" test.failure-domain/region-
|
||||||
kubectl label node/"$node" test.failure-domain/zone-
|
kubectl_retry label node/"$node" test.failure-domain/zone-
|
||||||
done
|
done
|
||||||
# TODO/LATER we could remove the CSI labels that would have been set as well
|
# TODO/LATER we could remove the CSI labels that would have been set as well
|
||||||
NAMESPACE=$1
|
NAMESPACE=$1
|
||||||
|
Loading…
Reference in New Issue
Block a user