From 4fcd649fa720b2469242ecdc7203473e1dae3739 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Wed, 28 Oct 2020 13:13:52 +0530 Subject: [PATCH] e2e: add testing for controller This commits adds an E2E testing to verify the metadata created by controller, We are not checking the generated omap data, but we will be verify PVC resize and binding pvc to application. Signed-off-by: Madhu Rajanna --- e2e/pvc.go | 67 ++++++++++++++++++++++++++ e2e/rbd.go | 17 +++++++ e2e/rbd_helper.go | 36 ++++++++++++++ e2e/utils.go | 120 ++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 240 insertions(+) diff --git a/e2e/pvc.go b/e2e/pvc.go index 5341b776d..8a40e800e 100644 --- a/e2e/pvc.go +++ b/e2e/pvc.go @@ -74,6 +74,73 @@ func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClai }) } +func createPVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume) error { + _, err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + if err != nil { + return err + } + _, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) + return err +} + +func deletePVCAndPV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, t int) error { + err := c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) + if err != nil { + return err + } + err = c.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}) + if err != nil { + return err + } + + timeout := time.Duration(t) * time.Minute + start := time.Now() + err = wait.PollImmediate(poll, timeout, func() (bool, error) { + // Check that the PVC is deleted. + e2elog.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", pvc.Name, pvc.Status.String(), int(time.Since(start).Seconds())) + _, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + if err == nil { + return false, nil + } + if !apierrs.IsNotFound(err) { + return false, fmt.Errorf("get on deleted PVC %v failed with error other than \"not found\": %w", pvc.Name, err) + } + + return true, nil + }) + if err != nil { + return err + } + start = time.Now() + return wait.PollImmediate(poll, timeout, func() (bool, error) { + // Check that the PV is deleted. + e2elog.Logf("waiting for PV %s in state %s to be deleted (%d seconds elapsed)", pv.Name, pv.Status.String(), int(time.Since(start).Seconds())) + + _, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + if err == nil { + return false, nil + } + + if !apierrs.IsNotFound(err) { + return false, fmt.Errorf("delete PV %v failed with error other than \"not found\": %w", pv.Name, err) + } + + return true, nil + }) +} + +func getPVCAndPV(c kubernetes.Interface, pvcName, pvcNamespace string) (*v1.PersistentVolumeClaim, *v1.PersistentVolume, error) { + pvc, err := c.CoreV1().PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + if err != nil { + return nil, nil, fmt.Errorf("failed to get PVC with error %v", err) + } + pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + return pvc, nil, fmt.Errorf("failed to delete PV with error %v", err) + } + return pvc, pv, nil +} + func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error { timeout := time.Duration(t) * time.Minute nameSpace := pvc.Namespace diff --git a/e2e/rbd.go b/e2e/rbd.go index 07c316b9d..a037afd3b 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -1241,6 +1241,23 @@ var _ = Describe("RBD", func() { } }) + By("validate the functionality of controller", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = validateController(f, pvcPath, appPath, rbdExamplePath+"storageclass.yaml") + if err != nil { + e2elog.Failf("failed to validate controller with error %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0) + err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + + }) // Make sure this should be last testcase in this file, because // it deletes pool By("Create a PVC and delete PVC when backend pool deleted", func() { diff --git a/e2e/rbd_helper.go b/e2e/rbd_helper.go index a8b20202c..5cdd149e1 100644 --- a/e2e/rbd_helper.go +++ b/e2e/rbd_helper.go @@ -389,3 +389,39 @@ func checkPVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeCl return nil } + +func deletePVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error { + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + + _, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rados rm %s csi.volume.%s", rbdOptions(pool), imageData.imageID), rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("failed to remove omap %s csi.volume.%s with error %v", rbdOptions(pool), imageData.imageID, stdErr) + } + + return nil +} + +func deletePVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error { + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + + _, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rados rmomapkey %s csi.volumes.default csi.volume.%s", rbdOptions(pool), imageData.pvName), rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("failed to remove %s csi.volumes.default csi.volume.%s with error %v", rbdOptions(pool), imageData.imageID, stdErr) + } + + return nil +} diff --git a/e2e/utils.go b/e2e/utils.go index ddb730e72..0e72f29d4 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -14,6 +14,7 @@ import ( v1 "k8s.io/api/core/v1" scv1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" utilyaml "k8s.io/apimachinery/pkg/util/yaml" @@ -39,6 +40,7 @@ const ( rbdmountOptions = "mountOptions" retainPolicy = v1.PersistentVolumeReclaimRetain + // deletePolicy is the default policy in E2E. deletePolicy = v1.PersistentVolumeReclaimDelete ) @@ -591,6 +593,124 @@ func validatePVCClone(sourcePvcPath, clonePvcPath, clonePvcAppPath string, f *fr validateRBDImageCount(f, 0) } +// validateController simulates the required operations to validate the +// controller. +// Controller will generates the omap data when the PV is created. +// for that we need to do below operations +// Create PVC with Retain policy +// Store the PVC and PV kubernetes objects so that we can create static +// binding between PVC-PV +// Delete the omap data created for PVC +// Create the static PVC and PV and let controller regenerate the omap +// Mount the PVC to application (NodeStage/NodePublish should work) +// Resize the PVC +// Delete the Application and PVC. +func validateController(f *framework.Framework, pvcPath, appPath, scPath string) error { + size := "1Gi" + poolName := defaultRBDPool + expandSize := "10Gi" + var err error + // create storageclass with retain + err = createRBDStorageClass(f.ClientSet, f, nil, nil, retainPolicy) + if err != nil { + return fmt.Errorf("failed to create storageclass with error %v", err) + } + + // create pvc + pvc, err := loadPVC(pvcPath) + if err != nil { + return fmt.Errorf("failed to load PVC with error %v", err) + } + resizePvc, err := loadPVC(pvcPath) + if err != nil { + return fmt.Errorf("failed to load PVC with error %v", err) + } + resizePvc.Namespace = f.UniqueName + + pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(size) + pvc.Namespace = f.UniqueName + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + return fmt.Errorf("failed to create PVC with error %v", err) + } + // get pvc and pv object + pvc, pv, err := getPVCAndPV(f.ClientSet, pvc.Name, pvc.Namespace) + if err != nil { + return fmt.Errorf("failed to get PVC with error %v", err) + } + // Recreate storageclass with delete policy + err = deleteResource(scPath) + if err != nil { + return fmt.Errorf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil, deletePolicy) + if err != nil { + return fmt.Errorf("failed to create storageclass with error %v", err) + } + // delete omap data + err = deletePVCImageJournalInPool(f, pvc, poolName) + if err != nil { + return err + } + err = deletePVCCSIJournalInPool(f, pvc, poolName) + if err != nil { + return err + } + // delete pvc and pv + err = deletePVCAndPV(f.ClientSet, pvc, pv, deployTimeout) + if err != nil { + return fmt.Errorf("failed to delete PVC or PV with error %v", err) + } + // create pvc and pv with application + pv.Spec.ClaimRef = nil + pv.Spec.PersistentVolumeReclaimPolicy = deletePolicy + // unset the resource version as should not be set on objects to be created + pvc.ResourceVersion = "" + pv.ResourceVersion = "" + err = createPVCAndPV(f.ClientSet, pvc, pv) + if err != nil { + e2elog.Failf("failed to create PVC or PV with error %v", err) + } + // bind PVC to application + app, err := loadApp(appPath) + if err != nil { + return err + } + app.Labels = map[string]string{"app": "resize-pvc"} + app.Namespace = f.UniqueName + opt := metav1.ListOptions{ + LabelSelector: "app=resize-pvc", + } + err = createApp(f.ClientSet, app, deployTimeout) + if err != nil { + return err + } + // resize PVC + err = expandPVCSize(f.ClientSet, resizePvc, expandSize, deployTimeout) + if err != nil { + return err + } + if *pvc.Spec.VolumeMode == v1.PersistentVolumeFilesystem { + err = checkDirSize(app, f, &opt, expandSize) + if err != nil { + return err + } + } + + if *pvc.Spec.VolumeMode == v1.PersistentVolumeBlock { + err = checkDeviceSize(app, f, &opt, expandSize) + if err != nil { + return err + } + } + // delete pvc and storageclass + err = deletePVCAndApp("", f, resizePvc, app) + if err != nil { + return err + } + return deleteResource(rbdExamplePath + "storageclass.yaml") +} + // k8sVersionGreaterEquals checks the ServerVersion of the Kubernetes cluster // and compares it to the major.minor version passed. In case the version of // the cluster is equal or higher to major.minor, `true` is returned, `false`