mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
ci: fix shell check failures
Signed-off-by: riya-singhal31 <rsinghal@redhat.com>
(cherry picked from commit 44612fe34c
)
This commit is contained in:
parent
9c1ca71ed0
commit
bd7ecc880e
@ -148,7 +148,7 @@ install_cephcsi_helm_charts() {
|
|||||||
NAMESPACE="default"
|
NAMESPACE="default"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
kubectl_retry create namespace ${NAMESPACE}
|
kubectl_retry create namespace "${NAMESPACE}"
|
||||||
|
|
||||||
# label the nodes uniformly for domain information
|
# label the nodes uniformly for domain information
|
||||||
for node in $(kubectl_retry get node -o jsonpath='{.items[*].metadata.name}'); do
|
for node in $(kubectl_retry get node -o jsonpath='{.items[*].metadata.name}'); do
|
||||||
@ -170,19 +170,19 @@ install_cephcsi_helm_charts() {
|
|||||||
# install ceph-csi-cephfs and ceph-csi-rbd charts
|
# install ceph-csi-cephfs and ceph-csi-rbd charts
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
"${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-cephfs", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${CEPHFS_SECRET_TEMPLATE_VALUES} ${CEPHFS_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs
|
"${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-cephfs", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${CEPHFS_SECRET_TEMPLATE_VALUES} ${CEPHFS_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs
|
||||||
check_deployment_status app=ceph-csi-cephfs ${NAMESPACE}
|
check_deployment_status app=ceph-csi-cephfs "${NAMESPACE}"
|
||||||
check_daemonset_status app=ceph-csi-cephfs ${NAMESPACE}
|
check_daemonset_status app=ceph-csi-cephfs "${NAMESPACE}"
|
||||||
|
|
||||||
# deleting configmaps as a workaround to avoid configmap already present
|
# deleting configmaps as a workaround to avoid configmap already present
|
||||||
# issue when installing ceph-csi-rbd
|
# issue when installing ceph-csi-rbd
|
||||||
kubectl_retry delete cm ceph-csi-config --namespace ${NAMESPACE}
|
kubectl_retry delete cm ceph-csi-config --namespace "${NAMESPACE}"
|
||||||
kubectl_retry delete cm ceph-config --namespace ${NAMESPACE}
|
kubectl_retry delete cm ceph-config --namespace "${NAMESPACE}"
|
||||||
|
|
||||||
# shellcheck disable=SC2086
|
# shellcheck disable=SC2086
|
||||||
"${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-rbd", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${RBD_SECRET_TEMPLATE_VALUES} ${RBD_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --set topology.enabled=true --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}" --set provisioner.maxSnapshotsOnImage=3 --set provisioner.minSnapshotsOnImage=2
|
"${HELM}" install --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.replicaCount=1 --set-json='commonLabels={"app.kubernetes.io/name": "ceph-csi-rbd", "app.kubernetes.io/managed-by": "helm"}' ${SET_SC_TEMPLATE_VALUES} ${RBD_SECRET_TEMPLATE_VALUES} ${RBD_CHART_NAME} "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --set topology.enabled=true --set topology.domainLabels="{${NODE_LABEL_REGION},${NODE_LABEL_ZONE}}" --set provisioner.maxSnapshotsOnImage=3 --set provisioner.minSnapshotsOnImage=2
|
||||||
|
|
||||||
check_deployment_status app=ceph-csi-rbd ${NAMESPACE}
|
check_deployment_status app=ceph-csi-rbd "${NAMESPACE}"
|
||||||
check_daemonset_status app=ceph-csi-rbd ${NAMESPACE}
|
check_daemonset_status app=ceph-csi-rbd "${NAMESPACE}"
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -197,9 +197,9 @@ cleanup_cephcsi_helm_charts() {
|
|||||||
if [ -z "$NAMESPACE" ]; then
|
if [ -z "$NAMESPACE" ]; then
|
||||||
NAMESPACE="default"
|
NAMESPACE="default"
|
||||||
fi
|
fi
|
||||||
"${HELM}" uninstall ${CEPHFS_CHART_NAME} --namespace ${NAMESPACE}
|
"${HELM}" uninstall ${CEPHFS_CHART_NAME} --namespace "${NAMESPACE}"
|
||||||
"${HELM}" uninstall ${RBD_CHART_NAME} --namespace ${NAMESPACE}
|
"${HELM}" uninstall ${RBD_CHART_NAME} --namespace "${NAMESPACE}"
|
||||||
kubectl_retry delete namespace ${NAMESPACE}
|
kubectl_retry delete namespace "${NAMESPACE}"
|
||||||
}
|
}
|
||||||
|
|
||||||
helm_reset() {
|
helm_reset() {
|
||||||
|
@ -27,21 +27,21 @@ function install_snapshot_controller() {
|
|||||||
namespace="kube-system"
|
namespace="kube-system"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
create_or_delete_resource "create" ${namespace}
|
create_or_delete_resource "create" "${namespace}"
|
||||||
|
|
||||||
pod_ready=$(kubectl get pods -l app=snapshot-controller -n ${namespace} -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
|
pod_ready=$(kubectl get pods -l app=snapshot-controller -n "${namespace}" -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
|
||||||
INC=0
|
INC=0
|
||||||
until [[ "${pod_ready}" == "true" || $INC -gt 20 ]]; do
|
until [[ "${pod_ready}" == "true" || $INC -gt 20 ]]; do
|
||||||
sleep 10
|
sleep 10
|
||||||
((++INC))
|
((++INC))
|
||||||
pod_ready=$(kubectl get pods -l app=snapshot-controller -n ${namespace} -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
|
pod_ready=$(kubectl get pods -l app=snapshot-controller -n "${namespace}" -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
|
||||||
echo "snapshotter pod status: ${pod_ready}"
|
echo "snapshotter pod status: ${pod_ready}"
|
||||||
done
|
done
|
||||||
|
|
||||||
if [ "${pod_ready}" != "true" ]; then
|
if [ "${pod_ready}" != "true" ]; then
|
||||||
echo "snapshotter controller creation failed"
|
echo "snapshotter controller creation failed"
|
||||||
kubectl get pods -l app=snapshot-controller -n ${namespace}
|
kubectl get pods -l app=snapshot-controller -n "${namespace}"
|
||||||
kubectl describe po -l app=snapshot-controller -n ${namespace}
|
kubectl describe po -l app=snapshot-controller -n "${namespace}"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@ -53,7 +53,7 @@ function cleanup_snapshot_controller() {
|
|||||||
if [ -z "${namespace}" ]; then
|
if [ -z "${namespace}" ]; then
|
||||||
namespace="kube-system"
|
namespace="kube-system"
|
||||||
fi
|
fi
|
||||||
create_or_delete_resource "delete" ${namespace}
|
create_or_delete_resource "delete" "${namespace}"
|
||||||
}
|
}
|
||||||
|
|
||||||
function create_or_delete_resource() {
|
function create_or_delete_resource() {
|
||||||
|
@ -363,7 +363,7 @@ def get_tool_box_pod_name(arg):
|
|||||||
print("failed to pod %s", err)
|
print("failed to pod %s", err)
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
#pylint: disable=too-many-branches
|
#pylint: disable=too-many-branches, E0012, W0719
|
||||||
def get_pool_name(arg, vol_id, is_rbd):
|
def get_pool_name(arg, vol_id, is_rbd):
|
||||||
"""
|
"""
|
||||||
get pool name from ceph backend
|
get pool name from ceph backend
|
||||||
|
Loading…
Reference in New Issue
Block a user