mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
ci: skip snapshot E2E if kube<1.17+
snapshot beta CRD wont work if the kubernetes version is less than 1.17.0 as the snapshot CRD wont be installed we cannot test the snapshot,so disabling it if the kube version is less than 1.17 Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
538b854853
commit
1f13692000
85
e2e/rbd.go
85
e2e/rbd.go
@ -234,52 +234,59 @@ var _ = Describe("RBD", func() {
|
||||
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||
})
|
||||
|
||||
// skipping snapshot testing
|
||||
By("create a PVC clone and Bind it to an app", func() {
|
||||
createRBDSnapshotClass(f)
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
v, err := f.ClientSet.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to get server version with error %v", err)
|
||||
Fail(err.Error())
|
||||
}
|
||||
// snapshot beta is only supported from v1.17+
|
||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") {
|
||||
createRBDSnapshotClass(f)
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
|
||||
pvc.Namespace = f.UniqueName
|
||||
e2elog.Logf("The PVC template %+v", pvc)
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
// validate created backend rbd images
|
||||
images := listRBDImages(f)
|
||||
if len(images) != 1 {
|
||||
e2elog.Logf("backend image count %d expected image count %d", len(images), 1)
|
||||
Fail("validate backend image failed")
|
||||
}
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Namespace = f.UniqueName
|
||||
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||
err = createSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
pool := "replicapool"
|
||||
snapList, err := listSnapshots(f, pool, images[0])
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
if len(snapList) != 1 {
|
||||
e2elog.Logf("backend snapshot not matching kube snap count,snap count = % kube snap count %d", len(snapList), 1)
|
||||
Fail("validate backend snapshot failed")
|
||||
}
|
||||
pvc.Namespace = f.UniqueName
|
||||
e2elog.Logf("The PVC template %+v", pvc)
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
// validate created backend rbd images
|
||||
images := listRBDImages(f)
|
||||
if len(images) != 1 {
|
||||
e2elog.Logf("backend image count %d expected image count %d", len(images), 1)
|
||||
Fail("validate backend image failed")
|
||||
}
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Namespace = f.UniqueName
|
||||
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||
err = createSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
pool := "replicapool"
|
||||
snapList, err := listSnapshots(f, pool, images[0])
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
if len(snapList) != 1 {
|
||||
e2elog.Logf("backend snapshot not matching kube snap count,snap count = % kube snap count %d", len(snapList), 1)
|
||||
Fail("validate backend snapshot failed")
|
||||
}
|
||||
|
||||
validatePVCAndAppBinding(pvcClonePath, appClonePath, f)
|
||||
validatePVCAndAppBinding(pvcClonePath, appClonePath, f)
|
||||
|
||||
err = deleteSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
err = deleteSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -4,19 +4,35 @@ set -e
|
||||
# This script will be used by travis to run functional test
|
||||
# against different kuberentes version
|
||||
export KUBE_VERSION=$1
|
||||
|
||||
# parse the kubernetes version, return the digit passed as argument
|
||||
# v1.17.0 -> kube_version 1 -> 1
|
||||
# v1.17.0 -> kube_version 2 -> 17
|
||||
kube_version() {
|
||||
echo "${KUBE_VERSION}" | sed 's/^v//' | cut -d'.' -f"${1}"
|
||||
}
|
||||
sudo scripts/minikube.sh up
|
||||
sudo scripts/minikube.sh deploy-rook
|
||||
sudo scripts/minikube.sh create-block-pool
|
||||
# pull docker images to speed up e2e
|
||||
sudo scripts/minikube.sh cephcsi
|
||||
sudo scripts/minikube.sh k8s-sidecar
|
||||
# delete snapshot CRD created by ceph-csi in rook
|
||||
scripts/install-snapshot.sh delete-crd
|
||||
# install snapshot controller
|
||||
scripts/install-snapshot.sh install
|
||||
sudo chown -R travis: "$HOME"/.minikube /usr/local/bin/kubectl
|
||||
KUBE_MAJOR=$(kube_version 1)
|
||||
KUBE_MINOR=$(kube_version 2)
|
||||
# skip snapshot operation if kube version is less than 1.17.0
|
||||
if [[ "${KUBE_MAJOR}" -ge 1 ]] && [[ "${KUBE_MINOR}" -ge 17 ]]; then
|
||||
# delete snapshot CRD created by ceph-csi in rook
|
||||
scripts/install-snapshot.sh delete-crd
|
||||
# install snapshot controller
|
||||
scripts/install-snapshot.sh install
|
||||
fi
|
||||
|
||||
# functional tests
|
||||
go test github.com/ceph/ceph-csi/e2e --deploy-timeout=10 -timeout=30m --cephcsi-namespace=cephcsi-e2e-$RANDOM -v -mod=vendor
|
||||
|
||||
scripts/install-snapshot.sh cleanup
|
||||
if [[ "${KUBE_MAJOR}" -ge 1 ]] && [[ "${KUBE_MINOR}" -ge 17 ]]; then
|
||||
# delete snapshot CRD
|
||||
scripts/install-snapshot.sh cleanup
|
||||
fi
|
||||
sudo scripts/minikube.sh clean
|
||||
|
@ -4,6 +4,12 @@ set -e
|
||||
# This script will be used by travis to run functional test
|
||||
# against different kuberentes version
|
||||
export KUBE_VERSION=$1
|
||||
# parse the kubernetes version, return the digit passed as argument
|
||||
# v1.17.0 -> kube_version 1 -> 1
|
||||
# v1.17.0 -> kube_version 2 -> 17
|
||||
kube_version() {
|
||||
echo "${KUBE_VERSION}" | sed 's/^v//' | cut -d'.' -f"${1}"
|
||||
}
|
||||
sudo scripts/minikube.sh up
|
||||
sudo scripts/minikube.sh deploy-rook
|
||||
sudo scripts/minikube.sh create-block-pool
|
||||
@ -15,10 +21,15 @@ sudo chown -R travis: "$HOME"/.minikube /usr/local/bin/kubectl
|
||||
NAMESPACE=cephcsi-e2e-$RANDOM
|
||||
# create ns for e2e
|
||||
kubectl create ns ${NAMESPACE}
|
||||
# delete snapshot CRD created by ceph-csi in rook
|
||||
scripts/install-snapshot.sh delete-crd
|
||||
# install snapshot controller
|
||||
scripts/install-snapshot.sh install
|
||||
KUBE_MAJOR=$(kube_version 1)
|
||||
KUBE_MINOR=$(kube_version 2)
|
||||
# skip snapshot operation if kube version is less than 1.17.0
|
||||
if [[ "${KUBE_MAJOR}" -ge 1 ]] && [[ "${KUBE_MINOR}" -ge 17 ]]; then
|
||||
# delete snapshot CRD created by ceph-csi in rook
|
||||
scripts/install-snapshot.sh delete-crd
|
||||
# install snapshot controller
|
||||
scripts/install-snapshot.sh install
|
||||
fi
|
||||
# set up helm
|
||||
scripts/install-helm.sh up
|
||||
# install cephcsi helm charts
|
||||
@ -27,7 +38,11 @@ scripts/install-helm.sh install-cephcsi ${NAMESPACE}
|
||||
go test github.com/ceph/ceph-csi/e2e -mod=vendor --deploy-timeout=10 -timeout=30m --cephcsi-namespace=${NAMESPACE} --deploy-cephfs=false --deploy-rbd=false -v
|
||||
|
||||
#cleanup
|
||||
scripts/install-snapshot.sh cleanup
|
||||
# skip snapshot operation if kube version is less than 1.17.0
|
||||
if [[ "${KUBE_MAJOR}" -ge 1 ]] && [[ "${KUBE_MINOR}" -ge 17 ]]; then
|
||||
# delete snapshot CRD
|
||||
scripts/install-snapshot.sh cleanup
|
||||
fi
|
||||
scripts/install-helm.sh cleanup-cephcsi ${NAMESPACE}
|
||||
scripts/install-helm.sh clean
|
||||
kubectl delete ns ${NAMESPACE}
|
||||
|
Loading…
Reference in New Issue
Block a user