helm: add helm charts E2E

This PR adds the support for helm
installation, and cephcsi helm charts
deployment and teardown and also runs E2E
on for helm charts.

Add socat to provide port forwadring access for helm

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2020-04-01 12:50:43 +05:30 committed by mergify[bot]
parent b2dfcae802
commit d09ffbd6de
10 changed files with 240 additions and 6 deletions

View File

@ -8,6 +8,7 @@ addons:
packages: packages:
- realpath - realpath
- ruby - ruby
- socat
services: services:
- docker - docker
@ -26,6 +27,7 @@ env:
- TEST_COVERAGE=stdout - TEST_COVERAGE=stdout
- GO_METALINTER_THREADS=1 - GO_METALINTER_THREADS=1
- GO_COVER_DIR=_output - GO_COVER_DIR=_output
- HELM_VERSION=v2.16.5
- VM_DRIVER=none - VM_DRIVER=none
- MINIKUBE_VERSION=v1.6.0 - MINIKUBE_VERSION=v1.6.0
- CHANGE_MINIKUBE_NONE_USER=true - CHANGE_MINIKUBE_NONE_USER=true
@ -102,6 +104,11 @@ jobs:
# - Travis Arm64 CI job runs inside unprivileged LXD which blocks # - Travis Arm64 CI job runs inside unprivileged LXD which blocks
# launching minikube test environment # launching minikube test environment
- travis_terminate 0 # deploy only on x86 - travis_terminate 0 # deploy only on x86
- name: cephcsi helm charts with kube 1.17.0
script:
- scripts/skip-doc-change.sh || travis_terminate 0;
- make image-cephcsi || travis_terminate 1;
- scripts/travis-helmtest.sh v1.17.0 || travis_terminate 1;
deploy: deploy:
- provider: script - provider: script

View File

@ -58,7 +58,7 @@ static-check:
./scripts/gosec.sh ./scripts/gosec.sh
func-test: func-test:
go test github.com/ceph/ceph-csi/e2e $(TESTOPTIONS) go test -mod=vendor github.com/ceph/ceph-csi/e2e $(TESTOPTIONS)
.PHONY: cephcsi .PHONY: cephcsi
cephcsi: cephcsi:

View File

@ -92,7 +92,7 @@ cluster or you can pass `kubeconfig`flag while running tests.
Functional tests are run by the `go test` command. Functional tests are run by the `go test` command.
```console ```console
$go test ./e2e/ -timeout=20m -v $go test ./e2e/ -timeout=20m -v -mod=vendor
``` ```
Functional tests can be invoked by `make` command Functional tests can be invoked by `make` command

View File

@ -129,6 +129,8 @@ var _ = Describe("cephfs", func() {
AfterEach(func() { AfterEach(func() {
if CurrentGinkgoTestDescription().Failed { if CurrentGinkgoTestDescription().Failed {
// log pods created by helm chart
logsCSIPods("app=ceph-csi-cephfs", c)
// log provisoner // log provisoner
logsCSIPods("app=csi-cephfsplugin-provisioner", c) logsCSIPods("app=csi-cephfsplugin-provisioner", c)
// log node plugin // log node plugin

View File

@ -19,6 +19,11 @@ var (
) )
func deployVault(c kubernetes.Interface, deployTimeout int) { func deployVault(c kubernetes.Interface, deployTimeout int) {
// hack to make helm E2E pass as helm charts creates this configmap as part
// of cephcsi deployment
_, err := framework.RunKubectl("delete", "cm", "ceph-csi-encryption-kms-config", "--namespace", cephCSINamespace, "--ignore-not-found=true")
Expect(err).Should(BeNil())
createORDeleteVault("create") createORDeleteVault("create")
opt := metav1.ListOptions{ opt := metav1.ListOptions{
LabelSelector: "app=vault", LabelSelector: "app=vault",

View File

@ -131,6 +131,8 @@ var _ = Describe("RBD", func() {
AfterEach(func() { AfterEach(func() {
if CurrentGinkgoTestDescription().Failed { if CurrentGinkgoTestDescription().Failed {
// log pods created by helm chart
logsCSIPods("app=ceph-csi-rbd", c)
// log provisoner // log provisoner
logsCSIPods("app=csi-rbdplugin-provisioner", c) logsCSIPods("app=csi-rbdplugin-provisioner", c)
// log node plugin // log node plugin
@ -382,7 +384,7 @@ var _ = Describe("RBD", func() {
} }
// delete rbd nodeplugin pods // delete rbd nodeplugin pods
err = deletePodWithLabel("app=csi-rbdplugin") err = deletePodWithLabel("app=csi-rbdplugin", cephCSINamespace, false)
if err != nil { if err != nil {
Fail(err.Error()) Fail(err.Error())
} }

View File

@ -775,8 +775,8 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framewor
} }
} }
func deletePodWithLabel(label string) error { func deletePodWithLabel(label, ns string, skipNotFound bool) error {
_, err := framework.RunKubectl("delete", "po", "-l", label) _, err := framework.RunKubectl("delete", "po", "-l", label, fmt.Sprintf("--ignore-not-found=%t", skipNotFound), fmt.Sprintf("--namespace=%s", ns))
if err != nil { if err != nil {
e2elog.Logf("failed to delete pod %v", err) e2elog.Logf("failed to delete pod %v", err)
} }

193
scripts/install-helm.sh Executable file
View File

@ -0,0 +1,193 @@
#!/bin/bash -e
#Based on ideas from https://github.com/rook/rook/blob/master/tests/scripts/helm.sh
TEMP="/tmp/cephcsi-helm-test"
HELM="helm"
HELM_VERSION=${HELM_VERSION:-"v2.16.5"}
arch="${ARCH:-}"
CEPHFS_CHART_NAME="ceph-csi-cephfs"
RBD_CHART_NAME="ceph-csi-rbd"
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
DEPLOY_TIMEOUT=600
function check_deployment_status() {
LABEL=$1
NAMESPACE=$2
echo "Checking Deployment status for label $LABEL in Namespace $NAMESPACE"
for ((retry = 0; retry <= DEPLOY_TIMEOUT; retry = retry + 5)); do
total_replicas=$(kubectl get deployment -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.replicas}')
ready_replicas=$(kubectl get deployment -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.readyReplicas}')
if [ "$total_replicas" != "$ready_replicas" ]; then
echo "Total replicas $total_replicas is not equal to ready count $ready_replicas"
kubectl get deployment -l "$LABEL" -n "$NAMESPACE"
sleep 10
else
echo "Total replicas $total_replicas is equal to ready count $ready_replicas"
break
fi
done
if [ "$retry" -gt "$DEPLOY_TIMEOUT" ]; then
echo "[Timeout] Failed to get deployment"
exit 1
fi
}
function check_daemonset_status() {
LABEL=$1
NAMESPACE=$2
echo "Checking Daemonset status for label $LABEL in Namespace $NAMESPACE"
for ((retry = 0; retry <= DEPLOY_TIMEOUT; retry = retry + 5)); do
total_replicas=$(kubectl get daemonset -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.numberAvailable}')
ready_replicas=$(kubectl get daemonset -l "$LABEL" -n "$NAMESPACE" -o jsonpath='{.items[0].status.numberReady}')
if [ "$total_replicas" != "$ready_replicas" ]; then
echo "Total replicas $total_replicas is not equal to ready count $ready_replicas"
kubectl get daemonset -l "$LABEL" -n "$NAMESPACE"
sleep 10
else
echo "Total replicas $total_replicas is equal to ready count $ready_replicas"
break
fi
done
if [ "$retry" -gt "$DEPLOY_TIMEOUT" ]; then
echo "[Timeout] Failed to get daemonset"
exit 1
fi
}
detectArch() {
case "$(uname -m)" in
"x86_64" | "amd64")
arch="amd64"
;;
"aarch64")
arch="arm64"
;;
"i386")
arch="i386"
;;
*)
echo "Couldn't translate 'uname -m' output to an available arch."
echo "Try setting ARCH environment variable to your system arch:"
echo "amd64, x86_64. aarch64, i386"
exit 1
;;
esac
}
install() {
if ! helm_loc="$(type -p "helm")" || [[ -z ${helm_loc} ]]; then
# Download and unpack helm
local dist
dist="$(uname -s)"
mkdir -p ${TEMP}
# shellcheck disable=SC2021
dist=$(echo "${dist}" | tr "[A-Z]" "[a-z]")
wget "https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-${dist}-${arch}.tar.gz" -O "${TEMP}/helm.tar.gz"
tar -C "${TEMP}" -zxvf "${TEMP}/helm.tar.gz"
fi
# set up RBAC for helm
kubectl --namespace kube-system create sa tiller
kubectl create clusterrolebinding tiller --clusterrole cluster-admin --serviceaccount=kube-system:tiller
# Init helm
"${HELM}" init --service-account tiller --output yaml |
sed 's@apiVersion: extensions/v1beta1@apiVersion: apps/v1@' |
sed 's@strategy: {}@selector: {"matchLabels": {"app": "helm", "name": "tiller"}}@' | kubectl apply -f -
kubectl -n kube-system patch deploy/tiller-deploy -p '{"spec": {"template": {"spec": {"serviceAccountName": "tiller"}}}}'
sleep 5
helm_ready=$(kubectl get pods -l app=helm -n kube-system -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
INC=0
until [[ "${helm_ready}" == "true" || $INC -gt 20 ]]; do
sleep 10
((++INC))
helm_ready=$(kubectl get pods -l app=helm -n kube-system -o jsonpath='{.items[0].status.containerStatuses[0].ready}')
echo "helm pod status: ${helm_ready}"
done
if [ "${helm_ready}" != "true" ]; then
echo "Helm init not successful"
kubectl get pods -l app=helm -n kube-system
kubectl logs -lapp=helm --all-containers=true -nkube-system
exit 1
fi
echo "Helm init successful"
}
install_cephcsi_helm_charts() {
NAMESPACE=$1
if [ -z "$NAMESPACE" ]; then
NAMESPACE="default"
fi
# install ceph-csi-cephfs and ceph-csi-rbd charts
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-cephfs --name ${CEPHFS_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-cephfsplugin-provisioner --set nodeplugin.fullnameOverride=csi-cephfsplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true
check_deployment_status app=ceph-csi-cephfs ${NAMESPACE}
check_daemonset_status app=ceph-csi-cephfs ${NAMESPACE}
# deleting configmap as a workaround to avoid configmap already present
# issue when installing ceph-csi-rbd
kubectl delete cm ceph-csi-config --namespace ${NAMESPACE}
"${HELM}" install "${SCRIPT_DIR}"/../charts/ceph-csi-rbd --name ${RBD_CHART_NAME} --namespace ${NAMESPACE} --set provisioner.fullnameOverride=csi-rbdplugin-provisioner --set nodeplugin.fullnameOverride=csi-rbdplugin --set configMapName=ceph-csi-config --set provisioner.podSecurityPolicy.enabled=true --set nodeplugin.podSecurityPolicy.enabled=true
check_deployment_status app=ceph-csi-rbd ${NAMESPACE}
check_daemonset_status app=ceph-csi-rbd ${NAMESPACE}
}
cleanup_cephcsi_helm_charts() {
"${HELM}" del --purge ${CEPHFS_CHART_NAME}
"${HELM}" del --purge ${RBD_CHART_NAME}
}
helm_reset() {
"${HELM}" reset
# shellcheck disable=SC2021
rm -rf "${TEMP}"
kubectl --namespace kube-system delete sa tiller
kubectl delete clusterrolebinding tiller
}
if [ -z "${arch}" ]; then
detectArch
fi
if ! helm_loc="$(type -p "helm")" || [[ -z ${helm_loc} ]]; then
dist="$(uname -s)"
# shellcheck disable=SC2021
dist=$(echo "${dist}" | tr "[A-Z]" "[a-z]")
HELM="${TEMP}/${dist}-${arch}/helm"
fi
case "${1:-}" in
up)
install
;;
clean)
helm_reset
;;
install-cephcsi)
install_cephcsi_helm_charts "$2"
;;
cleanup-cephcsi)
cleanup_cephcsi_helm_charts
;;
*)
echo "usage:" >&2
echo " $0 up" >&2
echo " $0 clean" >&2
echo " $0 install-cephcsi" >&2
echo " $0 cleanup-cephcsi" >&2
;;
esac

View File

@ -11,6 +11,6 @@ sudo scripts/minikube.sh cephcsi
sudo scripts/minikube.sh k8s-sidecar sudo scripts/minikube.sh k8s-sidecar
sudo chown -R travis: "$HOME"/.minikube /usr/local/bin/kubectl sudo chown -R travis: "$HOME"/.minikube /usr/local/bin/kubectl
# functional tests # functional tests
go test github.com/ceph/ceph-csi/e2e --deploy-timeout=10 -timeout=30m --cephcsi-namespace=cephcsi-e2e-$RANDOM -v go test github.com/ceph/ceph-csi/e2e --deploy-timeout=10 -timeout=30m --cephcsi-namespace=cephcsi-e2e-$RANDOM -v -mod=vendor
sudo scripts/minikube.sh clean sudo scripts/minikube.sh clean

25
scripts/travis-helmtest.sh Executable file
View File

@ -0,0 +1,25 @@
#!/bin/bash
set -e
# This script will be used by travis to run functional test
# against different kuberentes version
export KUBE_VERSION=$1
sudo scripts/minikube.sh up
sudo scripts/minikube.sh deploy-rook
# pull docker images to speed up e2e
sudo scripts/minikube.sh cephcsi
sudo scripts/minikube.sh k8s-sidecar
sudo chown -R travis: "$HOME"/.minikube /usr/local/bin/kubectl
NAMESPACE=cephcsi-e2e-$RANDOM
# set up helm
scripts/install-helm.sh up
# install cephcsi helm charts
scripts/install-helm.sh install-cephcsi ${NAMESPACE}
# functional tests
go test github.com/ceph/ceph-csi/e2e -mod=vendor --deploy-timeout=10 -timeout=30m --cephcsi-namespace=${NAMESPACE} --deploy-cephfs=false --deploy-rbd=false -v
#cleanup
scripts/install-helm.sh cleanup-cephcsi
scripts/install-helm.sh cleanup
sudo scripts/minikube.sh clean