From 90da7961c5f61e2ef8ac0cb2460a7362f57be4f0 Mon Sep 17 00:00:00 2001 From: Madhu Rajanna Date: Thu, 3 Sep 2020 15:04:29 +0530 Subject: [PATCH] e2e: rework on E2E framework rework of E2E framework for better code organization and add more helpful logs for debugging. Signed-off-by: Madhu Rajanna (cherry picked from commit b4693dcffeb4cb6ea69ed2d29bbea0bc1af094ec) --- e2e/cephfs.go | 1041 +++++++++++++++++++++-------------------- e2e/cephfs_helper.go | 131 ++++++ e2e/configmap.go | 120 +++++ e2e/namespace.go | 75 +++ e2e/node.go | 44 ++ e2e/pod.go | 212 +++++++++ e2e/pvc.go | 171 +++++++ e2e/rbd.go | 830 ++++++++++++++++---------------- e2e/rbd_helper.go | 390 +++++++++++++++ e2e/resize.go | 6 +- e2e/snapshot.go | 34 +- e2e/staticpvc.go | 62 ++- e2e/upgrade-cephfs.go | 74 +-- e2e/upgrade-rbd.go | 91 ++-- e2e/utils.go | 1020 +++------------------------------------- 15 files changed, 2344 insertions(+), 1957 deletions(-) create mode 100644 e2e/cephfs_helper.go create mode 100644 e2e/configmap.go create mode 100644 e2e/namespace.go create mode 100644 e2e/node.go create mode 100644 e2e/pod.go create mode 100644 e2e/pvc.go create mode 100644 e2e/rbd_helper.go diff --git a/e2e/cephfs.go b/e2e/cephfs.go index d761bca40..c152c487d 100644 --- a/e2e/cephfs.go +++ b/e2e/cephfs.go @@ -34,21 +34,21 @@ func deployCephfsPlugin() { data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC) if err != nil { - e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisionerRBAC, err) + e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-") if err != nil { - e2elog.Logf("failed to delete provisioner rbac %s %v", cephfsDirPath+cephfsProvisionerRBAC, err) + e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err) } data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC) if err != nil { - e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePluginRBAC, err) + e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-") if err != nil { - e2elog.Logf("failed to delete nodeplugin rbac %s %v", cephfsDirPath+cephfsNodePluginRBAC, err) + e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err) } createORDeleteCephfsResouces("create") @@ -61,63 +61,73 @@ func deleteCephfsPlugin() { func createORDeleteCephfsResouces(action string) { data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisioner) if err != nil { - e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisioner, err) + e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisioner, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s cephfs provisioner %v", action, err) + e2elog.Failf("failed to %s CephFS provisioner with error %v", action, err) } - data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC) + if err != nil { - e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisionerRBAC, err) + e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s cephfs provisioner rbac %v", action, err) + e2elog.Failf("failed to %s CephFS provisioner rbac with error %v", action, err) } data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerPSP) if err != nil { - e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisionerPSP, err) + e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerPSP, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s cephfs provisioner psp %v", action, err) + e2elog.Failf("failed to %s CephFS provisioner psp with error %v", action, err) } data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePlugin) if err != nil { - e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePlugin, err) + e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePlugin, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s cephfs nodeplugin %v", action, err) + e2elog.Failf("failed to %s CephFS nodeplugin with error %v", action, err) } data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC) if err != nil { - e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePluginRBAC, err) + e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s cephfs nodeplugin rbac %v", action, err) + e2elog.Failf("failed to %s CephFS nodeplugin rbac with error %v", action, err) } data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginPSP) if err != nil { - e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePluginPSP, err) + e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginPSP, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s cephfs nodeplugin psp %v", action, err) + e2elog.Failf("failed to %s CephFS nodeplugin psp with error %v", action, err) + } +} + +func validateSubvolumeCount(f *framework.Framework, count int, fileSystemName, subvolumegroup string) { + subVol, err := listCephFSSubVolumes(f, fileSystemName, subvolumegroup) + if err != nil { + e2elog.Failf("failed to list CephFS subvolumes with error %v", err) + } + if len(subVol) != count { + e2elog.Failf("subvolumes [%v]. subvolume count %d not matching expected count %v", subVol, len(subVol), count) } } var _ = Describe("cephfs", func() { f := framework.NewDefaultFramework("cephfs") var c clientset.Interface - // deploy cephfs CSI + // deploy CephFS CSI BeforeEach(func() { if !testCephFS || upgradeTesting { Skip("Skipping CephFS E2E") @@ -127,13 +137,19 @@ var _ = Describe("cephfs", func() { if cephCSINamespace != defaultNs { err := createNamespace(c, cephCSINamespace) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create namespace %s with error %v", cephCSINamespace, err) } } deployCephfsPlugin() } - createConfigMap(cephfsDirPath, f.ClientSet, f) - createCephfsSecret(f.ClientSet, f) + err := createConfigMap(cephfsDirPath, f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create configmap with error %v", err) + } + err = createCephfsSecret(f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create secret with error %v", err) + } }) AfterEach(func() { @@ -148,22 +164,31 @@ var _ = Describe("cephfs", func() { // log node plugin logsCSIPods("app=csi-cephfsplugin", c) } - deleteConfigMap(cephfsDirPath) - deleteResource(cephfsExamplePath + "secret.yaml") - deleteResource(cephfsExamplePath + "storageclass.yaml") + err := deleteConfigMap(cephfsDirPath) + if err != nil { + e2elog.Failf("failed to delete configmap with error %v", err) + } + err = deleteResource(cephfsExamplePath + "secret.yaml") + if err != nil { + e2elog.Failf("failed to delete secret with error %v", err) + } + err = deleteResource(cephfsExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } if deployCephFS { deleteCephfsPlugin() if cephCSINamespace != defaultNs { err := deleteNamespace(c, cephCSINamespace) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete namespace %s with error %v", cephCSINamespace, err) } } } }) - Context("Test cephfs CSI", func() { - It("Test cephfs CSI", func() { + Context("Test CephFS CSI", func() { + It("Test CephFS CSI", func() { pvcPath := cephfsExamplePath + "pvc.yaml" appPath := cephfsExamplePath + "pod.yaml" pvcClonePath := cephfsExamplePath + "pvc-restore.yaml" @@ -172,549 +197,543 @@ var _ = Describe("cephfs", func() { appSmartClonePath := cephfsExamplePath + "pod-clone.yaml" snapshotPath := cephfsExamplePath + "snapshot.yaml" - By("checking provisioner deployment is running") - var err error - err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) - if err != nil { - Fail(err.Error()) - } + By("checking provisioner deployment is running", func() { + err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) + if err != nil { + e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err) + } + }) - By("checking nodeplugin deamonsets is running") - err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) - if err != nil { - Fail(err.Error()) - } + By("checking nodeplugin deamonset pods are running", func() { + err := waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) + if err != nil { + e2elog.Failf("timeout waiting for daemonset %s with error %v", cephfsDeamonSetName, err) + } + }) By("check static PVC", func() { scPath := cephfsExamplePath + "secret.yaml" err := validateCephFsStaticPV(f, appPath, scPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to validate CephFS static pv with error %v", err) } }) - By("create a storage class with pool and a PVC then Bind it to an app", func() { - createCephfsStorageClass(f.ClientSet, f, true, "") - validatePVCAndAppBinding(pvcPath, appPath, f) - deleteResource(cephfsExamplePath + "storageclass.yaml") + By("create a storageclass with pool and a PVC then bind it to an app", func() { + err := createCephfsStorageClass(f.ClientSet, f, true, "") + if err != nil { + e2elog.Failf("failed to create CephFS storageclass with error %v", err) + } + err = validatePVCAndAppBinding(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err) + } + err = deleteResource(cephfsExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete CephFS storageclass with error %v", err) + } }) - createCephfsStorageClass(f.ClientSet, f, false, "") + By("create a PVC and bind it to an app", func() { + err := createCephfsStorageClass(f.ClientSet, f, false, "") + if err != nil { + e2elog.Failf("failed to create CephFS storageclass with error %v", err) + } + err = validatePVCAndAppBinding(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err) + } + }) - By("create and delete a PVC", func() { - By("create a PVC and Bind it to an app", func() { - validatePVCAndAppBinding(pvcPath, appPath, f) + By("create a PVC and bind it to an app with normal user", func() { + err := validateNormalUserPVCAccess(pvcPath, f) + if err != nil { + e2elog.Failf("failed to validate normal user CephFS pvc and application binding with error %v", err) + } + }) - }) + By("create/delete multiple PVCs and Apps", func() { + totalCount := 2 + pvc, err := loadPVC(pvcPath) + if err != nil { + e2elog.Failf("failed to load PVC with error %v", err) + } + pvc.Namespace = f.UniqueName - By("create a PVC and Bind it to an app with normal user", func() { - validateNormalUserPVCAccess(pvcPath, f) - }) + app, err := loadApp(appPath) + if err != nil { + e2elog.Failf("failed to load application with error %v", err) + } + app.Namespace = f.UniqueName + // create PVC and app + for i := 0; i < totalCount; i++ { + name := fmt.Sprintf("%s%d", f.UniqueName, i) + err = createPVCAndApp(name, f, pvc, app, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC or application with error %v", err) + } - By("create/delete multiple PVCs and Apps", func() { - totalCount := 2 + } + + validateSubvolumeCount(f, totalCount, fileSystemName, subvolumegroup) + // delete PVC and app + for i := 0; i < totalCount; i++ { + name := fmt.Sprintf("%s%d", f.UniqueName, i) + err = deletePVCAndApp(name, f, pvc, app) + if err != nil { + e2elog.Failf("failed to delete PVC or application with error %v", err) + } + + } + validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) + }) + + By("check data persist after recreating pod", func() { + err := checkDataPersist(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to check data persist in pvc with error %v", err) + } + }) + + By("create PVC, delete backing subvolume and check pv deletion", func() { + pvc, err := loadPVC(pvcPath) + if pvc == nil { + e2elog.Failf("failed to load PVC with error %v", err) + } + pvc.Namespace = f.UniqueName + + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC with error %v", err) + } + + err = deleteBackingCephFSVolume(f, pvc) + if err != nil { + e2elog.Failf("failed to delete CephFS subvolume with error %v", err) + } + + err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete PVC with error %v", err) + } + }) + + By("validate multiple subvolumegroup creation", func() { + err := deleteResource(cephfsExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + // re-define configmap with information of multiple clusters. + subvolgrpInfo := map[string]string{ + "clusterID-1": "subvolgrp1", + "clusterID-2": "subvolgrp2", + } + err = createCustomConfigMap(f.ClientSet, cephfsDirPath, subvolgrpInfo) + if err != nil { + e2elog.Failf("failed to create configmap with error %v", err) + } + err = createCephfsStorageClass(f.ClientSet, f, false, "clusterID-1") + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + err = validatePVCAndAppBinding(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to validate pvc and application with error %v", err) + } + err = deleteResource(cephfsExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + // verify subvolumegroup creation. + err = validateSubvolumegroup(f, "subvolgrp1") + if err != nil { + e2elog.Failf("failed to validate subvolume group with error %v", err) + } + + // create resources and verify subvolume group creation + // for the second cluster. + err = createCephfsStorageClass(f.ClientSet, f, false, "clusterID-2") + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + err = validatePVCAndAppBinding(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to validate pvc and application with error %v", err) + } + err = deleteResource(cephfsExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = validateSubvolumegroup(f, "subvolgrp2") + if err != nil { + e2elog.Failf("failed to validate subvolume group with error %v", err) + } + err = deleteConfigMap(cephfsDirPath) + if err != nil { + e2elog.Failf("failed to delete configmap with error %v", err) + } + err = createConfigMap(cephfsDirPath, f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create configmap with error %v", err) + } + err = createCephfsStorageClass(f.ClientSet, f, false, "") + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + }) + + By("Resize PVC and check application directory size", func() { + v, err := f.ClientSet.Discovery().ServerVersion() + if err != nil { + e2elog.Failf("failed to get server version with error with error %v", err) + } + + // Resize 0.3.0 is only supported from v1.15+ + if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") { + err := resizePVCAndValidateSize(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to resize PVC with error %v", err) + } + } + }) + + By("Mount pvc as readonly in pod", func() { + // create PVC and bind it to an app + pvc, err := loadPVC(pvcPath) + if err != nil { + e2elog.Failf("failed to load PVC with error %v", err) + } + pvc.Namespace = f.UniqueName + + app, err := loadApp(appPath) + if err != nil { + e2elog.Failf("failed to load application with error %v", err) + } + + app.Namespace = f.UniqueName + label := map[string]string{ + "app": app.Name, + } + app.Labels = label + app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name + app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true + err = createPVCAndApp("", f, pvc, app, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC or application with error %v", err) + } + + opt := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", app.Name), + } + + filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + _, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt) + readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath) + if !strings.Contains(stdErr, readOnlyErr) { + e2elog.Failf(stdErr) + } + + // delete PVC and app + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + e2elog.Failf("failed to delete PVC or application with error %v", err) + } + }) + + By("create a PVC clone and bind it to an app", func() { + v, err := f.ClientSet.Discovery().ServerVersion() + if err != nil { + e2elog.Failf("failed to get server version with error with error %v", err) + } + // snapshot beta is only supported from v1.17+ + if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") { + var wg sync.WaitGroup + totalCount := 3 + // totalSubvolumes represents the subvolumes in backend + // always totalCount+parentPVC + totalSubvolumes := totalCount + 1 + wg.Add(totalCount) + err = createCephFSSnapshotClass(f) + if err != nil { + e2elog.Failf("failed to delete CephFS storageclass with error %v", err) + } pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } + pvc.Namespace = f.UniqueName - - app, err := loadApp(appPath) - if err != nil { - Fail(err.Error()) - } - app.Namespace = f.UniqueName - // create pvc and app - for i := 0; i < totalCount; i++ { - name := fmt.Sprintf("%s%d", f.UniqueName, i) - err := createPVCAndApp(name, f, pvc, app, deployTimeout) - if err != nil { - Fail(err.Error()) - } - - } - subVol := listCephFSSubVolumes(f, fileSystemName, subvolumegroup) - if len(subVol) != totalCount { - msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), totalCount) - e2elog.Logf(msg) - Fail(msg) - } - // delete pvc and app - for i := 0; i < totalCount; i++ { - name := fmt.Sprintf("%s%d", f.UniqueName, i) - err := deletePVCAndApp(name, f, pvc, app) - if err != nil { - Fail(err.Error()) - } - - } - subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup) - if len(subVol) != 0 { - msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), 0) - e2elog.Logf(msg) - Fail(msg) - } - }) - - By("check data persist after recreating pod with same pvc", func() { - err := checkDataPersist(pvcPath, appPath, f) - if err != nil { - Fail(err.Error()) - } - }) - - By("creating a PVC, deleting backing subvolume, and checking successful PV deletion", func() { - pvc, err := loadPVC(pvcPath) - if pvc == nil { - Fail(err.Error()) - } - pvc.Namespace = f.UniqueName - err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC with error %v", err) } - err = deleteBackingCephFSVolume(f, pvc) - if err != nil { - Fail(err.Error()) - } - - err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) - if err != nil { - Fail(err.Error()) - } - }) - - By("validate multiple subvolumegroup creation", func() { - deleteResource(cephfsExamplePath + "storageclass.yaml") - // re-define configmap with information of multiple clusters. - subvolgrpInfo := map[string]string{ - "clusterID-1": "subvolgrp1", - "clusterID-2": "subvolgrp2", - } - createCustomConfigMap(f.ClientSet, cephfsDirPath, subvolgrpInfo) - createCephfsStorageClass(f.ClientSet, f, false, "clusterID-1") - validatePVCAndAppBinding(pvcPath, appPath, f) - deleteResource(cephfsExamplePath + "storageclass.yaml") - // verify subvolumegroup creation. - err := validateSubvolumegroup(f, "subvolgrp1") - if err != nil { - Fail(err.Error()) - } - - // create resources and verify subvolume group creation - // for the second cluster. - createCephfsStorageClass(f.ClientSet, f, false, "clusterID-2") - validatePVCAndAppBinding(pvcPath, appPath, f) - deleteResource(cephfsExamplePath + "storageclass.yaml") - err = validateSubvolumegroup(f, "subvolgrp2") - if err != nil { - Fail(err.Error()) - } - deleteConfigMap(cephfsDirPath) - }) - - createConfigMap(cephfsDirPath, f.ClientSet, f) - createCephfsStorageClass(f.ClientSet, f, false, "") - - By("Resize PVC and check application directory size", func() { - v, err := f.ClientSet.Discovery().ServerVersion() - if err != nil { - e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) - } - - // Resize 0.3.0 is only supported from v1.15+ - if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") { - err := resizePVCAndValidateSize(pvcPath, appPath, f) - if err != nil { - e2elog.Logf("failed to resize PVC %v", err) - Fail(err.Error()) - } - } - - }) - - By("Mount pvc as readonly in pod", func() { - // create pvc and bind it to an app - pvc, err := loadPVC(pvcPath) - if err != nil { - Fail(err.Error()) - } - - pvc.Namespace = f.UniqueName - app, err := loadApp(appPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName - label := map[string]string{ - "app": app.Name, - } - app.Labels = label app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name - app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true - err = createPVCAndApp("", f, pvc, app, deployTimeout) + wErr := writeDataInPod(app, f) + if wErr != nil { + e2elog.Failf("failed to write data with error %v", wErr) + } + + snap := getSnapshot(snapshotPath) + snap.Namespace = f.UniqueName + snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name + // create snapshot + for i := 0; i < totalCount; i++ { + go func(w *sync.WaitGroup, n int, s vs.VolumeSnapshot) { + s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) + err = createSnapshot(&s, deployTimeout) + if err != nil { + e2elog.Failf("failed to create snapshot with error %v", err) + } + w.Done() + }(&wg, i, snap) + } + wg.Wait() + + pvcClone, err := loadPVC(pvcClonePath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } - - opt := metav1.ListOptions{ - LabelSelector: fmt.Sprintf("app=%s", app.Name), + appClone, err := loadApp(appClonePath) + if err != nil { + e2elog.Failf("failed to load application with error %v", err) } + pvcClone.Namespace = f.UniqueName + appClone.Namespace = f.UniqueName + pvcClone.Spec.DataSource.Name = fmt.Sprintf("%s%d", f.UniqueName, 0) - filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" - _, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt) - readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath) - if !strings.Contains(stdErr, readOnlyErr) { - Fail(stdErr) + // create multiple PVC from same snapshot + wg.Add(totalCount) + for i := 0; i < totalCount; i++ { + + go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { + name := fmt.Sprintf("%s%d", f.UniqueName, n) + err = createPVCAndApp(name, f, &p, &a, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC and app with error %v", err) + } + w.Done() + }(&wg, i, *pvcClone, *appClone) } + wg.Wait() - // delete pvc and app + validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup) + + wg.Add(totalCount) + // delete clone and app + for i := 0; i < totalCount; i++ { + go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { + name := fmt.Sprintf("%s%d", f.UniqueName, n) + p.Spec.DataSource.Name = name + err = deletePVCAndApp(name, f, &p, &a) + if err != nil { + e2elog.Failf("failed to delete PVC and app with error %v", err) + } + w.Done() + }(&wg, i, *pvcClone, *appClone) + } + wg.Wait() + + parentPVCCount := totalSubvolumes - totalCount + validateSubvolumeCount(f, parentPVCCount, fileSystemName, subvolumegroup) + // create clones from different snapshosts and bind it to an + // app + wg.Add(totalCount) + for i := 0; i < totalCount; i++ { + go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { + name := fmt.Sprintf("%s%d", f.UniqueName, n) + p.Spec.DataSource.Name = name + err = createPVCAndApp(name, f, &p, &a, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC and app with error %v", err) + } + w.Done() + }(&wg, i, *pvcClone, *appClone) + } + wg.Wait() + + validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup) + + wg.Add(totalCount) + // delete snapshot + for i := 0; i < totalCount; i++ { + go func(w *sync.WaitGroup, n int, s vs.VolumeSnapshot) { + s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) + err = deleteSnapshot(&s, deployTimeout) + if err != nil { + e2elog.Failf("failed to delete snapshot with error %v", err) + } + w.Done() + }(&wg, i, snap) + } + wg.Wait() + + wg.Add(totalCount) + // delete clone and app + for i := 0; i < totalCount; i++ { + go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { + name := fmt.Sprintf("%s%d", f.UniqueName, n) + p.Spec.DataSource.Name = name + err = deletePVCAndApp(name, f, &p, &a) + if err != nil { + e2elog.Failf("failed to delete PVC and app with error %v", err) + } + w.Done() + }(&wg, i, *pvcClone, *appClone) + } + wg.Wait() + + validateSubvolumeCount(f, parentPVCCount, fileSystemName, subvolumegroup) + // delete parent pvc err = deletePVCAndApp("", f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC or application with error %v", err) } - }) - By("create a PVC clone and bind it to an app", func() { - v, err := f.ClientSet.Discovery().ServerVersion() - if err != nil { - e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) - } - // snapshot beta is only supported from v1.17+ - if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") { - var wg sync.WaitGroup - totalCount := 3 - // totalSubvolumes represents the subvolumes in backend - // always totalCount+parentPVC - totalSubvolumes := totalCount + 1 - wg.Add(totalCount) - createCephFSSnapshotClass(f) - pvc, err := loadPVC(pvcPath) - if err != nil { - Fail(err.Error()) - } + validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) + } + }) - pvc.Namespace = f.UniqueName - e2elog.Logf("The PVC template %+v", pvc) - err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) - if err != nil { - Fail(err.Error()) - } - - app, err := loadApp(appPath) - if err != nil { - Fail(err.Error()) - } - - app.Namespace = f.UniqueName - app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name - wErr := writeDataInPod(app, f) - if wErr != nil { - Fail(wErr.Error()) - } - - snap := getSnapshot(snapshotPath) - snap.Namespace = f.UniqueName - snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name - // create snapshot - for i := 0; i < totalCount; i++ { - go func(w *sync.WaitGroup, n int, s vs.VolumeSnapshot) { - s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) - err = createSnapshot(&s, deployTimeout) - if err != nil { - e2elog.Logf("failed to create snapshot %v", err) - Fail(err.Error()) - } - w.Done() - }(&wg, i, snap) - } - wg.Wait() - - pvcClone, err := loadPVC(pvcClonePath) - if err != nil { - Fail(err.Error()) - } - appClone, err := loadApp(appClonePath) - if err != nil { - Fail(err.Error()) - } - pvcClone.Namespace = f.UniqueName - appClone.Namespace = f.UniqueName - pvcClone.Spec.DataSource.Name = fmt.Sprintf("%s%d", f.UniqueName, 0) - - // create multiple PVC from same snapshot - wg.Add(totalCount) - for i := 0; i < totalCount; i++ { - - go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { - name := fmt.Sprintf("%s%d", f.UniqueName, n) - err = createPVCAndApp(name, f, &p, &a, deployTimeout) - if err != nil { - e2elog.Logf("failed to create pvc and app %v", err) - Fail(err.Error()) - } - w.Done() - }(&wg, i, *pvcClone, *appClone) - } - wg.Wait() - - subVol := listCephFSSubVolumes(f, fileSystemName, subvolumegroup) - if len(subVol) != totalSubvolumes { - msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), totalSubvolumes) - e2elog.Logf(msg) - Fail(msg) - } - wg.Add(totalCount) - // delete clone and app - for i := 0; i < totalCount; i++ { - go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { - name := fmt.Sprintf("%s%d", f.UniqueName, n) - p.Spec.DataSource.Name = name - err = deletePVCAndApp(name, f, &p, &a) - if err != nil { - e2elog.Logf("failed to delete pvc and app %v", err) - Fail(err.Error()) - } - w.Done() - }(&wg, i, *pvcClone, *appClone) - } - wg.Wait() - - parentPVCCount := totalSubvolumes - totalCount - subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup) - if len(subVol) != parentPVCCount { - msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), parentPVCCount) - e2elog.Logf(msg) - Fail(msg) - } - // create clones from different snapshosts and bind it to an - // app - wg.Add(totalCount) - for i := 0; i < totalCount; i++ { - go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { - name := fmt.Sprintf("%s%d", f.UniqueName, n) - p.Spec.DataSource.Name = name - err = createPVCAndApp(name, f, &p, &a, deployTimeout) - if err != nil { - e2elog.Logf("failed to create pvc and app %v", err) - Fail(err.Error()) - } - w.Done() - }(&wg, i, *pvcClone, *appClone) - } - wg.Wait() - - subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup) - if len(subVol) != totalSubvolumes { - msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), totalSubvolumes) - e2elog.Logf(msg) - Fail(msg) - } - - wg.Add(totalCount) - // delete snapshot - for i := 0; i < totalCount; i++ { - go func(w *sync.WaitGroup, n int, s vs.VolumeSnapshot) { - s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) - err = deleteSnapshot(&s, deployTimeout) - if err != nil { - e2elog.Logf("failed to delete snapshot %v", err) - Fail(err.Error()) - } - w.Done() - }(&wg, i, snap) - } - wg.Wait() - - wg.Add(totalCount) - // delete clone and app - for i := 0; i < totalCount; i++ { - go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { - name := fmt.Sprintf("%s%d", f.UniqueName, n) - p.Spec.DataSource.Name = name - err = deletePVCAndApp(name, f, &p, &a) - if err != nil { - e2elog.Logf("failed to delete pvc and app %v", err) - Fail(err.Error()) - } - w.Done() - }(&wg, i, *pvcClone, *appClone) - } - wg.Wait() - - subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup) - if len(subVol) != parentPVCCount { - msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), parentPVCCount) - e2elog.Logf(msg) - Fail(msg) - } - // delete parent pvc - err = deletePVCAndApp("", f, pvc, app) - if err != nil { - Fail(err.Error()) - } - - subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup) - if len(subVol) != 0 { - msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), 0) - e2elog.Logf(msg) - Fail(msg) - } - } - }) - - By("create a PVC-PVC clone and bind it to an app", func() { - v, err := f.ClientSet.Discovery().ServerVersion() - if err != nil { - e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) - } - // pvc clone is only supported from v1.16+ - if v.Major > "1" || (v.Major == "1" && v.Minor >= "16") { - var wg sync.WaitGroup - totalCount := 3 - // totalSubvolumes represents the subvolumes in backend - // always totalCount+parentPVC - totalSubvolumes := totalCount + 1 - pvc, err := loadPVC(pvcPath) - if err != nil { - Fail(err.Error()) - } - - pvc.Namespace = f.UniqueName - err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) - if err != nil { - Fail(err.Error()) - } - app, err := loadApp(appPath) - if err != nil { - Fail(err.Error()) - } - app.Namespace = f.UniqueName - app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name - wErr := writeDataInPod(app, f) - if wErr != nil { - Fail(wErr.Error()) - } - - pvcClone, err := loadPVC(pvcSmartClonePath) - if err != nil { - Fail(err.Error()) - } - pvcClone.Spec.DataSource.Name = pvc.Name - pvcClone.Namespace = f.UniqueName - appClone, err := loadApp(appSmartClonePath) - if err != nil { - Fail(err.Error()) - } - appClone.Namespace = f.UniqueName - wg.Add(totalCount) - // create clone and bind it to an app - for i := 0; i < totalCount; i++ { - go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { - name := fmt.Sprintf("%s%d", f.UniqueName, n) - err = createPVCAndApp(name, f, &p, &a, deployTimeout) - if err != nil { - Fail(err.Error()) - } - w.Done() - }(&wg, i, *pvcClone, *appClone) - } - wg.Wait() - - subVol := listCephFSSubVolumes(f, fileSystemName, subvolumegroup) - if len(subVol) != totalSubvolumes { - msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), totalSubvolumes) - e2elog.Logf(msg) - Fail(msg) - } - // delete parent pvc - err = deletePVCAndApp("", f, pvc, app) - if err != nil { - Fail(err.Error()) - } - - wg.Add(totalCount) - // delete clone and app - for i := 0; i < totalCount; i++ { - go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { - name := fmt.Sprintf("%s%d", f.UniqueName, n) - p.Spec.DataSource.Name = name - err = deletePVCAndApp(name, f, &p, &a) - if err != nil { - Fail(err.Error()) - } - w.Done() - }(&wg, i, *pvcClone, *appClone) - } - wg.Wait() - - subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup) - if len(subVol) != 0 { - msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), 0) - e2elog.Logf(msg) - Fail(msg) - } - } - }) - - By("Create ROX PVC and Bind it to an app", func() { - // create pvc and bind it to an app + By("create a PVC-PVC clone and bind it to an app", func() { + v, err := f.ClientSet.Discovery().ServerVersion() + if err != nil { + e2elog.Failf("failed to get server version with error with error %v", err) + } + // pvc clone is only supported from v1.16+ + if v.Major > "1" || (v.Major == "1" && v.Minor >= "16") { + var wg sync.WaitGroup + totalCount := 3 + // totalSubvolumes represents the subvolumes in backend + // always totalCount+parentPVC + totalSubvolumes := totalCount + 1 pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName - pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} + err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC with error %v", err) + } app, err := loadApp(appPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } - app.Namespace = f.UniqueName - label := map[string]string{ - "app": app.Name, - } - app.Labels = label app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name - err = createPVCAndApp("", f, pvc, app, deployTimeout) + wErr := writeDataInPod(app, f) + if wErr != nil { + e2elog.Failf("failed to write data from application %v", wErr) + } + + pvcClone, err := loadPVC(pvcSmartClonePath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } - - opt := metav1.ListOptions{ - LabelSelector: fmt.Sprintf("app=%s", app.Name), + pvcClone.Spec.DataSource.Name = pvc.Name + pvcClone.Namespace = f.UniqueName + appClone, err := loadApp(appSmartClonePath) + if err != nil { + e2elog.Failf("failed to load application with error %v", err) } - - filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" - _, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt) - readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath) - if !strings.Contains(stdErr, readOnlyErr) { - Fail(stdErr) + appClone.Namespace = f.UniqueName + wg.Add(totalCount) + // create clone and bind it to an app + for i := 0; i < totalCount; i++ { + go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { + name := fmt.Sprintf("%s%d", f.UniqueName, n) + err = createPVCAndApp(name, f, &p, &a, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC or application with error %v", err) + } + w.Done() + }(&wg, i, *pvcClone, *appClone) } + wg.Wait() - // delete pvc and app + validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup) + + // delete parent pvc err = deletePVCAndApp("", f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC or application with error %v", err) } - }) - // Make sure this should be last testcase in this file, because - // it deletes pool - By("Create a PVC and Delete PVC when backend pool deleted", func() { - err := pvcDeleteWhenPoolNotFound(pvcPath, true, f) - if err != nil { - Fail(err.Error()) - } - }) + wg.Add(totalCount) + // delete clone and app + for i := 0; i < totalCount; i++ { + go func(w *sync.WaitGroup, n int, p v1.PersistentVolumeClaim, a v1.Pod) { + name := fmt.Sprintf("%s%d", f.UniqueName, n) + p.Spec.DataSource.Name = name + err = deletePVCAndApp(name, f, &p, &a) + if err != nil { + e2elog.Failf("failed to delete PVC or application with error %v", err) + } + w.Done() + }(&wg, i, *pvcClone, *appClone) + } + wg.Wait() + + validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup) + } + }) + + By("Create ROX PVC and bind it to an app", func() { + // create PVC and bind it to an app + pvc, err := loadPVC(pvcPath) + if err != nil { + e2elog.Failf("failed to load PVC with error %v", err) + } + + pvc.Namespace = f.UniqueName + pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} + app, err := loadApp(appPath) + if err != nil { + e2elog.Failf("failed to load application with error %v", err) + } + + app.Namespace = f.UniqueName + label := map[string]string{ + "app": app.Name, + } + app.Labels = label + app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name + err = createPVCAndApp("", f, pvc, app, deployTimeout) + if err != nil { + e2elog.Failf("failed to create PVC or application with error %v", err) + } + + opt := metav1.ListOptions{ + LabelSelector: fmt.Sprintf("app=%s", app.Name), + } + + filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" + _, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt) + readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath) + if !strings.Contains(stdErr, readOnlyErr) { + e2elog.Failf(stdErr) + } + + // delete PVC and app + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + e2elog.Failf("failed to delete PVC or application with error %v", err) + } + }) + // Make sure this should be last testcase in this file, because + // it deletes pool + By("Create a PVC and delete PVC when backend pool deleted", func() { + err := pvcDeleteWhenPoolNotFound(pvcPath, true, f) + if err != nil { + e2elog.Failf("failed to delete PVC with error %v", err) + } }) }) diff --git a/e2e/cephfs_helper.go b/e2e/cephfs_helper.go new file mode 100644 index 000000000..4b261a0ea --- /dev/null +++ b/e2e/cephfs_helper.go @@ -0,0 +1,131 @@ +package e2e + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +const ( + adminUser = "admin" +) + +// validateSubvolumegroup validates whether subvolumegroup is present. +func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error { + cmd := fmt.Sprintf("ceph fs subvolumegroup getpath myfs %s", subvolgrp) + stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("failed to getpath for subvolumegroup %s with error %v", subvolgrp, stdErr) + } + expectedGrpPath := "/volumes/" + subvolgrp + stdOut = strings.TrimSpace(stdOut) + if stdOut != expectedGrpPath { + return fmt.Errorf("error unexpected group path. Found: %s", stdOut) + } + return nil +} + +func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, enablePool bool, clusterID string) error { + scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml") + sc, err := getStorageClass(scPath) + if err != nil { + return err + } + sc.Parameters["fsName"] = "myfs" + sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = rookNamespace + sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephfsProvisionerSecretName + + sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = rookNamespace + sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = cephfsProvisionerSecretName + + sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = rookNamespace + sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = cephfsNodePluginSecretName + + if enablePool { + sc.Parameters["pool"] = "myfs-data0" + } + fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error getting fsid %v", stdErr) + } + // remove new line present in fsID + fsID = strings.Trim(fsID, "\n") + if clusterID != "" { + fsID = clusterID + } + sc.Namespace = cephCSINamespace + sc.Parameters["clusterID"] = fsID + _, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) + return err +} + +func createCephfsSecret(c kubernetes.Interface, f *framework.Framework) error { + scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "secret.yaml") + sc, err := getSecret(scPath) + if err != nil { + return err + } + adminKey, stdErr, err := execCommandInToolBoxPod(f, "ceph auth get-key client.admin", rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error getting admin key %v", stdErr) + } + sc.StringData["adminID"] = adminUser + sc.StringData["adminKey"] = adminKey + delete(sc.StringData, "userID") + delete(sc.StringData, "userKey") + sc.Namespace = cephCSINamespace + _, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{}) + return err +} + +func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error { + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + + _, stdErr, err := execCommandInToolBoxPod(f, "ceph fs subvolume rm myfs "+imageData.imageName+" "+subvolumegroup, rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error deleting backing volume %s %v", imageData.imageName, stdErr) + } + return nil +} + +type cephfsSubVolume struct { + Name string `json:"name"` +} + +func listCephFSSubVolumes(f *framework.Framework, filesystem, groupname string) ([]cephfsSubVolume, error) { + var subVols []cephfsSubVolume + stdout, stdErr, err := execCommandInToolBoxPod(f, fmt.Sprintf("ceph fs subvolume ls %s --group_name=%s --format=json", filesystem, groupname), rookNamespace) + if err != nil { + return subVols, err + } + if stdErr != "" { + return subVols, fmt.Errorf("error listing subolumes %v", stdErr) + } + + err = json.Unmarshal([]byte(stdout), &subVols) + if err != nil { + return subVols, err + } + return subVols, nil +} diff --git a/e2e/configmap.go b/e2e/configmap.go new file mode 100644 index 000000000..60b593561 --- /dev/null +++ b/e2e/configmap.go @@ -0,0 +1,120 @@ +package e2e + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/ceph/ceph-csi/internal/util" + + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +func deleteConfigMap(pluginPath string) error { + path := pluginPath + configMap + _, err := framework.RunKubectl(cephCSINamespace, "delete", "-f", path, ns) + if err != nil { + return err + } + return nil +} + +func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Framework) error { + path := pluginPath + configMap + cm := v1.ConfigMap{} + err := unmarshal(path, &cm) + if err != nil { + return err + } + + fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error getting fsid %v", stdErr) + } + // remove new line present in fsID + fsID = strings.Trim(fsID, "\n") + // get mon list + mons, err := getMons(rookNamespace, c) + if err != nil { + return err + } + conmap := []util.ClusterInfo{{ + ClusterID: fsID, + Monitors: mons, + RadosNamespace: radosNamespace, + }} + if upgradeTesting { + subvolumegroup = "csi" + } + conmap[0].CephFS.SubvolumeGroup = subvolumegroup + data, err := json.Marshal(conmap) + if err != nil { + return err + } + cm.Data["config.json"] = string(data) + cm.Namespace = cephCSINamespace + // if the configmap is present update it,during cephcsi helm charts + // deployment empty configmap gets created we need to override it + _, err = c.CoreV1().ConfigMaps(cephCSINamespace).Get(context.TODO(), cm.Name, metav1.GetOptions{}) + + if err == nil { + _, updateErr := c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{}) + if updateErr != nil { + return updateErr + } + } + if apierrs.IsNotFound(err) { + _, err = c.CoreV1().ConfigMaps(cephCSINamespace).Create(context.TODO(), &cm, metav1.CreateOptions{}) + } + + return err +} + +// createCustomConfigMap provides multiple clusters information. +func createCustomConfigMap(c kubernetes.Interface, pluginPath string, subvolgrpInfo map[string]string) error { + path := pluginPath + configMap + cm := v1.ConfigMap{} + err := unmarshal(path, &cm) + if err != nil { + return err + } + // get mon list + mons, err := getMons(rookNamespace, c) + if err != nil { + return err + } + // get clusterIDs + var clusterID []string + for key := range subvolgrpInfo { + clusterID = append(clusterID, key) + } + conmap := []util.ClusterInfo{ + { + ClusterID: clusterID[0], + Monitors: mons, + }, + { + ClusterID: clusterID[1], + Monitors: mons, + }} + for i := 0; i < len(subvolgrpInfo); i++ { + conmap[i].CephFS.SubvolumeGroup = subvolgrpInfo[clusterID[i]] + } + data, err := json.Marshal(conmap) + if err != nil { + return err + } + cm.Data["config.json"] = string(data) + cm.Namespace = cephCSINamespace + // since a configmap is already created, update the existing configmap + _, err = c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{}) + return err +} diff --git a/e2e/namespace.go b/e2e/namespace.go new file mode 100644 index 000000000..bbe59e8d9 --- /dev/null +++ b/e2e/namespace.go @@ -0,0 +1,75 @@ +package e2e + +import ( + "context" + "fmt" + "io/ioutil" + "strings" + "time" + + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + testutils "k8s.io/kubernetes/test/utils" +) + +func createNamespace(c kubernetes.Interface, name string) error { + timeout := time.Duration(deployTimeout) * time.Minute + ns := &v1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + _, err := c.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) + if err != nil && !apierrs.IsAlreadyExists(err) { + return err + } + + return wait.PollImmediate(poll, timeout, func() (bool, error) { + _, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + e2elog.Logf("Error getting namespace: '%s': %v", name, err) + if apierrs.IsNotFound(err) { + return false, nil + } + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + return true, nil + }) +} + +func deleteNamespace(c kubernetes.Interface, name string) error { + timeout := time.Duration(deployTimeout) * time.Minute + err := c.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil && !apierrs.IsNotFound(err) { + return err + } + return wait.PollImmediate(poll, timeout, func() (bool, error) { + _, err = c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + return true, nil + } + e2elog.Logf("Error getting namespace: '%s': %v", name, err) + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + return false, nil + }) +} + +func replaceNamespaceInTemplate(filePath string) (string, error) { + read, err := ioutil.ReadFile(filePath) + if err != nil { + return "", err + } + return strings.ReplaceAll(string(read), "namespace: default", fmt.Sprintf("namespace: %s", cephCSINamespace)), nil +} diff --git a/e2e/node.go b/e2e/node.go new file mode 100644 index 000000000..9a4546af2 --- /dev/null +++ b/e2e/node.go @@ -0,0 +1,44 @@ +package e2e + +import ( + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" +) + +func createNodeLabel(f *framework.Framework, labelKey, labelValue string) error { + // NOTE: This makes all nodes (in a multi-node setup) in the test take + // the same label values, which is fine for the test + nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + for i := range nodes.Items { + framework.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue) + } + return nil +} + +func deleteNodeLabel(c kubernetes.Interface, labelKey string) error { + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + for i := range nodes.Items { + framework.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey) + } + return nil +} + +func checkNodeHasLabel(c kubernetes.Interface, labelKey, labelValue string) error { + nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + if err != nil { + return err + } + for i := range nodes.Items { + framework.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue) + } + return nil +} diff --git a/e2e/pod.go b/e2e/pod.go new file mode 100644 index 000000000..c4a7e4b38 --- /dev/null +++ b/e2e/pod.go @@ -0,0 +1,212 @@ +package e2e + +import ( + "context" + "errors" + "fmt" + "strings" + "time" + + appsv1 "k8s.io/api/apps/v1" + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/pkg/client/conditions" + "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + testutils "k8s.io/kubernetes/test/utils" +) + +func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error { + timeout := time.Duration(t) * time.Minute + start := time.Now() + e2elog.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns) + + return wait.PollImmediate(poll, timeout, func() (bool, error) { + ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + e2elog.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) + if strings.Contains(err.Error(), "not found") { + return false, nil + } + if testutils.IsRetryableAPIError(err) { + return false, nil + } + return false, err + } + dNum := ds.Status.DesiredNumberScheduled + ready := ds.Status.NumberReady + e2elog.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ready, dNum, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds())) + if ready != dNum { + return false, nil + } + + return true, nil + }) +} + +// Waits for the deployment to complete. + +func waitForDeploymentComplete(name, ns string, c kubernetes.Interface, t int) error { + var ( + deployment *appsv1.Deployment + reason string + err error + ) + timeout := time.Duration(t) * time.Minute + err = wait.PollImmediate(poll, timeout, func() (bool, error) { + deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false, err + } + + // TODO need to check rolling update + + // When the deployment status and its underlying resources reach the + // desired state, we're done + if deployment.Status.Replicas == deployment.Status.ReadyReplicas { + return true, nil + } + e2elog.Logf("deployment status: expected replica count %d running replica count %d", deployment.Status.Replicas, deployment.Status.ReadyReplicas) + reason = fmt.Sprintf("deployment status: %#v", deployment.Status.String()) + return false, nil + }) + + if errors.Is(err, wait.ErrWaitTimeout) { + err = fmt.Errorf("%s", reason) + } + if err != nil { + return fmt.Errorf("error waiting for deployment %q status to match expectation: %w", name, err) + } + return nil +} + +func getCommandInPodOpts(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (framework.ExecOptions, error) { + cmd := []string{"/bin/sh", "-c", c} + podList, err := f.PodClientNS(ns).List(context.TODO(), *opt) + framework.ExpectNoError(err) + if len(podList.Items) == 0 { + return framework.ExecOptions{}, errors.New("podlist is empty") + } + if err != nil { + return framework.ExecOptions{}, err + } + return framework.ExecOptions{ + Command: cmd, + PodName: podList.Items[0].Name, + Namespace: ns, + ContainerName: podList.Items[0].Spec.Containers[0].Name, + Stdin: nil, + CaptureStdout: true, + CaptureStderr: true, + PreserveWhitespace: true, + }, nil +} + +func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string, error) { + podPot, err := getCommandInPodOpts(f, c, ns, opt) + if err != nil { + return "", "", err + } + stdOut, stdErr, err := f.ExecWithOptions(podPot) + if stdErr != "" { + e2elog.Logf("stdErr occurred: %v", stdErr) + } + return stdOut, stdErr, err +} + +func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, string, error) { + opt := &metav1.ListOptions{ + LabelSelector: rookTolBoxPodLabel, + } + podPot, err := getCommandInPodOpts(f, c, ns, opt) + if err != nil { + return "", "", err + } + stdOut, stdErr, err := f.ExecWithOptions(podPot) + if stdErr != "" { + e2elog.Logf("stdErr occurred: %v", stdErr) + } + return stdOut, stdErr, err +} + +func execCommandInPodAndAllowFail(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string) { + podPot, err := getCommandInPodOpts(f, c, ns, opt) + if err != nil { + return "", err.Error() + } + stdOut, stdErr, err := f.ExecWithOptions(podPot) + if err != nil { + e2elog.Logf("command %s failed: %v", c, err) + } + return stdOut, stdErr +} + +func loadApp(path string) (*v1.Pod, error) { + app := v1.Pod{} + err := unmarshal(path, &app) + if err != nil { + return nil, err + } + return &app, nil +} + +func createApp(c kubernetes.Interface, app *v1.Pod, timeout int) error { + _, err := c.CoreV1().Pods(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) + if err != nil { + return err + } + return waitForPodInRunningState(app.Name, app.Namespace, c, timeout) +} + +func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) error { + timeout := time.Duration(t) * time.Minute + start := time.Now() + e2elog.Logf("Waiting up to %v to be in Running state", name) + return wait.PollImmediate(poll, timeout, func() (bool, error) { + pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false, err + } + switch pod.Status.Phase { + case v1.PodRunning: + return true, nil + case v1.PodFailed, v1.PodSucceeded: + return false, conditions.ErrPodCompleted + } + e2elog.Logf("%s app is in %s phase expected to be in Running state (%d seconds elapsed)", name, pod.Status.Phase, int(time.Since(start).Seconds())) + return false, nil + }) +} + +func deletePod(name, ns string, c kubernetes.Interface, t int) error { + timeout := time.Duration(t) * time.Minute + err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + return err + } + start := time.Now() + e2elog.Logf("Waiting for pod %v to be deleted", name) + return wait.PollImmediate(poll, timeout, func() (bool, error) { + _, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) + + if apierrs.IsNotFound(err) { + return true, nil + } + e2elog.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds())) + if err != nil { + return false, err + } + return false, nil + }) +} + +func deletePodWithLabel(label, ns string, skipNotFound bool) error { + _, err := framework.RunKubectl(cephCSINamespace, "delete", "po", "-l", label, fmt.Sprintf("--ignore-not-found=%t", skipNotFound), fmt.Sprintf("--namespace=%s", ns)) + if err != nil { + e2elog.Logf("failed to delete pod %v", err) + } + return err +} diff --git a/e2e/pvc.go b/e2e/pvc.go new file mode 100644 index 000000000..5341b776d --- /dev/null +++ b/e2e/pvc.go @@ -0,0 +1,171 @@ +package e2e + +import ( + "context" + "errors" + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + apierrs "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" + e2epv "k8s.io/kubernetes/test/e2e/framework/pv" + testutils "k8s.io/kubernetes/test/utils" +) + +func loadPVC(path string) (*v1.PersistentVolumeClaim, error) { + pvc := &v1.PersistentVolumeClaim{} + err := unmarshal(path, &pvc) + if err != nil { + return nil, err + } + return pvc, err +} + +func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error { + timeout := time.Duration(t) * time.Minute + pv := &v1.PersistentVolume{} + var err error + _, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) + if err != nil { + return err + } + if timeout == 0 { + return nil + } + name := pvc.Name + start := time.Now() + e2elog.Logf("Waiting up to %v to be in Bound state", pvc) + + return wait.PollImmediate(poll, timeout, func() (bool, error) { + e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", pvc.Name, int(time.Since(start).Seconds())) + pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + e2elog.Logf("Error getting pvc in namespace: '%s': %v", pvc.Namespace, err) + if testutils.IsRetryableAPIError(err) { + return false, nil + } + if apierrs.IsNotFound(err) { + return false, nil + } + return false, err + } + + if pvc.Spec.VolumeName == "" { + return false, nil + } + + pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + return false, err + } + if apierrs.IsNotFound(err) { + return false, nil + } + err = e2epv.WaitOnPVandPVC(c, pvc.Namespace, pv, pvc) + if err != nil { + return false, nil + } + return true, nil + }) +} + +func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error { + timeout := time.Duration(t) * time.Minute + nameSpace := pvc.Namespace + name := pvc.Name + var err error + e2elog.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace) + + pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return err + } + pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + return err + } + + err = c.CoreV1().PersistentVolumeClaims(nameSpace).Delete(context.TODO(), name, metav1.DeleteOptions{}) + if err != nil { + return fmt.Errorf("delete of PVC %v failed: %w", name, err) + } + start := time.Now() + return wait.PollImmediate(poll, timeout, func() (bool, error) { + // Check that the PVC is really deleted. + e2elog.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", name, pvc.Status.String(), int(time.Since(start).Seconds())) + pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + return false, nil + } + if !apierrs.IsNotFound(err) { + return false, fmt.Errorf("get on deleted PVC %v failed with error other than \"not found\": %w", name, err) + } + + // Examine the pv.ClaimRef and UID. Expect nil values. + _, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) + if err == nil { + return false, nil + } + + if !apierrs.IsNotFound(err) { + return false, fmt.Errorf("delete PV %v failed with error other than \"not found\": %w", pv.Name, err) + } + + return true, nil + }) +} + +// getBoundPV returns a PV details. +func getBoundPV(client kubernetes.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { + // Get new copy of the claim + claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + + // Get the bound PV + pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) + return pv, err +} + +func checkPVSelectorValuesForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error { + pv, err := getBoundPV(f.ClientSet, pvc) + if err != nil { + return err + } + + if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 { + return errors.New("found empty NodeSelectorTerms in PV") + } + + rFound := false + zFound := false + for _, expression := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions { + switch expression.Key { + case nodeCSIRegionLabel: + if rFound { + return errors.New("found multiple occurrences of topology key for region") + } + rFound = true + if expression.Values[0] != regionValue { + return errors.New("topology value for region label mismatch") + } + case nodeCSIZoneLabel: + if zFound { + return errors.New("found multiple occurrences of topology key for zone") + } + zFound = true + if expression.Values[0] != zoneValue { + return errors.New("topology value for zone label mismatch") + } + default: + return errors.New("unexpected key in node selector terms found in PV") + } + } + return nil +} diff --git a/e2e/rbd.go b/e2e/rbd.go index 0355c9fd5..2c209399c 100644 --- a/e2e/rbd.go +++ b/e2e/rbd.go @@ -42,20 +42,20 @@ func deployRBDPlugin() { // delete objects deployed by rook data, err := replaceNamespaceInTemplate(rbdDirPath + rbdProvisionerRBAC) if err != nil { - e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdProvisionerRBAC, err) + e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisionerRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-") if err != nil { - e2elog.Logf("failed to delete provisioner rbac %s %v", rbdDirPath+rbdProvisionerRBAC, err) + e2elog.Failf("failed to delete provisioner rbac %s with error %v", rbdDirPath+rbdProvisionerRBAC, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginRBAC) if err != nil { - e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePluginRBAC, err) + e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePluginRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-") if err != nil { - e2elog.Logf("failed to delete nodeplugin rbac %s %v", rbdDirPath+rbdNodePluginRBAC, err) + e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", rbdDirPath+rbdNodePluginRBAC, err) } createORDeleteRbdResouces("create") @@ -68,60 +68,69 @@ func deleteRBDPlugin() { func createORDeleteRbdResouces(action string) { data, err := replaceNamespaceInTemplate(rbdDirPath + rbdProvisioner) if err != nil { - e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdProvisioner, err) + e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisioner, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s rbd provisioner %v", action, err) + e2elog.Failf("failed to %s rbd provisioner with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdProvisionerRBAC) if err != nil { - e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdProvisionerRBAC, err) + e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisionerRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s provisioner rbac %v", action, err) + e2elog.Failf("failed to %s provisioner rbac with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdProvisionerPSP) if err != nil { - e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdProvisionerPSP, err) + e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdProvisionerPSP, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, "-f", "-") if err != nil { - e2elog.Logf("failed to %s provisioner psp %v", action, err) + e2elog.Failf("failed to %s provisioner psp with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePlugin) if err != nil { - e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePlugin, err) + e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePlugin, err) } domainLabel := nodeRegionLabel + "," + nodeZoneLabel data = addTopologyDomainsToDSYaml(data, domainLabel) _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s nodeplugin %v", action, err) - Fail(err.Error()) + e2elog.Failf("failed to %s nodeplugin with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginRBAC) if err != nil { - e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePluginRBAC, err) + e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePluginRBAC, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s nodeplugin rbac %v", action, err) + e2elog.Failf("failed to %s nodeplugin rbac with error %v", action, err) } data, err = replaceNamespaceInTemplate(rbdDirPath + rbdNodePluginPSP) if err != nil { - e2elog.Logf("failed to read content from %s %v", rbdDirPath+rbdNodePluginPSP, err) + e2elog.Failf("failed to read content from %s with error %v", rbdDirPath+rbdNodePluginPSP, err) } _, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-") if err != nil { - e2elog.Logf("failed to %s nodeplugin psp %v", action, err) + e2elog.Failf("failed to %s nodeplugin psp with error %v", action, err) + } +} + +func validateRBDImageCount(f *framework.Framework, count int) { + imageList, err := listRBDImages(f) + if err != nil { + e2elog.Failf("failed to list rbd images with error %v", err) + } + if len(imageList) != count { + e2elog.Failf("backend images not matching kubernetes resource count,image count %d kubernetes resource count %d", len(imageList), count) } } @@ -135,19 +144,34 @@ var _ = Describe("RBD", func() { } c = f.ClientSet if deployRBD { - createNodeLabel(f, nodeRegionLabel, regionValue) - createNodeLabel(f, nodeZoneLabel, zoneValue) + err := createNodeLabel(f, nodeRegionLabel, regionValue) + if err != nil { + e2elog.Failf("failed to create node label with error %v", err) + } + err = createNodeLabel(f, nodeZoneLabel, zoneValue) + if err != nil { + e2elog.Failf("failed to create node label with error %v", err) + } if cephCSINamespace != defaultNs { - err := createNamespace(c, cephCSINamespace) + err = createNamespace(c, cephCSINamespace) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create namespace with error %v", err) } } deployRBDPlugin() } - createConfigMap(rbdDirPath, f.ClientSet, f) - createRBDStorageClass(f.ClientSet, f, nil, nil) - createRBDSecret(f.ClientSet, f) + err := createConfigMap(rbdDirPath, f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create configmap with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + err = createRBDSecret(f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create secret with error %v", err) + } deployVault(f.ClientSet, deployTimeout) }) @@ -164,25 +188,46 @@ var _ = Describe("RBD", func() { logsCSIPods("app=csi-rbdplugin", c) } - deleteConfigMap(rbdDirPath) - deleteResource(rbdExamplePath + "secret.yaml") - deleteResource(rbdExamplePath + "storageclass.yaml") + err := deleteConfigMap(rbdDirPath) + if err != nil { + e2elog.Failf("failed to delete configmap with error %v", err) + } + err = deleteResource(rbdExamplePath + "secret.yaml") + if err != nil { + e2elog.Failf("failed to delete secret with error %v", err) + } + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } // deleteResource(rbdExamplePath + "snapshotclass.yaml") deleteVault() if deployRBD { deleteRBDPlugin() if cephCSINamespace != defaultNs { - err := deleteNamespace(c, cephCSINamespace) + err = deleteNamespace(c, cephCSINamespace) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete namespace with error %v", err) } } } - deleteNodeLabel(c, nodeRegionLabel) - deleteNodeLabel(c, nodeZoneLabel) + err = deleteNodeLabel(c, nodeRegionLabel) + if err != nil { + e2elog.Failf("failed to delete node label with error %v", err) + } + err = deleteNodeLabel(c, nodeZoneLabel) + if err != nil { + e2elog.Failf("failed to delete node label with error %v", err) + } // Remove the CSI labels that get added - deleteNodeLabel(c, nodeCSIRegionLabel) - deleteNodeLabel(c, nodeCSIZoneLabel) + err = deleteNodeLabel(c, nodeCSIRegionLabel) + if err != nil { + e2elog.Failf("failed to delete node label with error %v", err) + } + err = deleteNodeLabel(c, nodeCSIZoneLabel) + if err != nil { + e2elog.Failf("failed to delete node label with error %v", err) + } }) Context("Test RBD CSI", func() { @@ -200,112 +245,139 @@ var _ = Describe("RBD", func() { By("checking provisioner deployment is running", func() { err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err) } }) - By("checking nodeplugin deamonsets is running", func() { + By("checking nodeplugin deamonset pods are running", func() { err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err) } }) - By("create a PVC and Bind it to an app", func() { - validatePVCAndAppBinding(pvcPath, appPath, f) + By("create a PVC and bind it to an app", func() { + err := validatePVCAndAppBinding(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to validate pvc and application binding with error %v", err) + } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) }) - By("create a PVC and Bind it to an app with normal user", func() { - validateNormalUserPVCAccess(pvcPath, f) + By("create a PVC and bind it to an app with normal user", func() { + err := validateNormalUserPVCAccess(pvcPath, f) + if err != nil { + e2elog.Failf("failed to validate normal user pvc and application bidning with error %v", err) + } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) }) - By("create a PVC and Bind it to an app with ext4 as the FS ", func() { - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"csi.storage.k8s.io/fstype": "ext4"}) - validatePVCAndAppBinding(pvcPath, appPath, f) + By("create a PVC and bind it to an app with ext4 as the FS ", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"csi.storage.k8s.io/fstype": "ext4"}) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + err = validatePVCAndAppBinding(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to validate pvc and application binding with error %v", err) + } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") + validateRBDImageCount(f, 0) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) } - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, nil) }) - By("create a PVC and Bind it to an app with encrypted RBD volume", func() { - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"encrypted": "true"}) - validateEncryptedPVCAndAppBinding(pvcPath, appPath, "", f) + By("create a PVC and bind it to an app with encrypted RBD volume", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"encrypted": "true"}) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, "", f) + if err != nil { + e2elog.Failf("failed to validate encrypted pvc with error %v", err) + } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") + validateRBDImageCount(f, 0) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) } - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, nil) }) - By("create a PVC and Bind it to an app with encrypted RBD volume with Vault KMS", func() { - deleteResource(rbdExamplePath + "storageclass.yaml") + By("create a PVC and bind it to an app with encrypted RBD volume with Vault KMS", func() { + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } scOpts := map[string]string{ "encrypted": "true", "encryptionKMSID": "vault-test", } - createRBDStorageClass(f.ClientSet, f, nil, scOpts) - validateEncryptedPVCAndAppBinding(pvcPath, appPath, "vault", f) - // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") + err = createRBDStorageClass(f.ClientSet, f, nil, scOpts) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + err = validateEncryptedPVCAndAppBinding(pvcPath, appPath, "vault", f) + if err != nil { + e2elog.Failf("failed to validate encrypted pvc with error %v", err) + } + // validate created backend rbd images + validateRBDImageCount(f, 0) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) } - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, nil) }) By("create a PVC clone and bind it to an app", func() { v, err := f.ClientSet.Discovery().ServerVersion() if err != nil { - e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) + e2elog.Failf("failed to get server version with error with error %v", err) } // snapshot beta is only supported from v1.17+ if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") { var wg sync.WaitGroup totalCount := 10 wg.Add(totalCount) - createRBDSnapshotClass(f) + err = createRBDSnapshotClass(f) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName - e2elog.Logf("The PVC template %+v", pvc) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) - } - // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 1 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 1) - Fail("validate backend image failed") + e2elog.Failf("failed to create PVC with error %v", err) } + validateRBDImageCount(f, 1) snap := getSnapshot(snapshotPath) snap.Namespace = f.UniqueName snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name @@ -315,28 +387,22 @@ var _ = Describe("RBD", func() { s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) err = createSnapshot(&s, deployTimeout) if err != nil { - e2elog.Logf("failed to create snapshot %v", err) - Fail(err.Error()) + e2elog.Failf("failed to create snapshot with error %v", err) } w.Done() }(&wg, i, snap) } wg.Wait() - imageList := listRBDImages(f) // total images in cluster is 1 parent rbd image+ total snaps - if len(imageList) != totalCount+1 { - e2elog.Logf("backend images not matching kubernetes pvc,snap count,image count %d kubernetes resource count %d", len(imageList), totalCount+1) - Fail("validate backend images failed") - } - + validateRBDImageCount(f, totalCount+1) pvcClone, err := loadPVC(pvcClonePath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } appClone, err := loadApp(appClonePath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } pvcClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName @@ -349,23 +415,17 @@ var _ = Describe("RBD", func() { name := fmt.Sprintf("%s%d", f.UniqueName, n) err = createPVCAndApp(name, f, &p, &a, deployTimeout) if err != nil { - e2elog.Logf("failed to create pvc and app %v", err) - Fail(err.Error()) + e2elog.Failf("failed to create PVC and application with error %v", err) } w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() - imageList = listRBDImages(f) // total images in cluster is 1 parent rbd image+ total // snaps+ total clones totalCloneCount := totalCount + totalCount + 1 - if len(imageList) != totalCloneCount { - e2elog.Logf("backend images not matching kubernetes resource count,image count %d kubernetes resource count %d", len(imageList), totalCount+totalCount+1) - Fail("validate backend images failed") - } - + validateRBDImageCount(f, totalCloneCount) wg.Add(totalCount) // delete clone and app for i := 0; i < totalCount; i++ { @@ -374,20 +434,16 @@ var _ = Describe("RBD", func() { p.Spec.DataSource.Name = name err = deletePVCAndApp(name, f, &p, &a) if err != nil { - e2elog.Logf("failed to delete pvc and app %v", err) - Fail(err.Error()) + e2elog.Failf("failed to delete PVC and app with error %v", err) } w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() - imageList = listRBDImages(f) - // total images in cluster is 1 parent rbd image+ total snaps - if len(imageList) != totalCount+1 { - e2elog.Logf("backend images not matching kubernetes pvc,snap count,image count %d kubernetes resource count %d", len(imageList), totalCount+1) - Fail("validate backend images failed") - } + // total images in cluster is 1 parent rbd image+ total + // snaps + validateRBDImageCount(f, totalCount+1) // create clones from different snapshosts and bind it to an // app wg.Add(totalCount) @@ -397,36 +453,26 @@ var _ = Describe("RBD", func() { p.Spec.DataSource.Name = name err = createPVCAndApp(name, f, &p, &a, deployTimeout) if err != nil { - e2elog.Logf("failed to create pvc and app %v", err) - Fail(err.Error()) + e2elog.Failf("failed to create PVC and app with error %v", err) } w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() - imageList = listRBDImages(f) // total images in cluster is 1 parent rbd image+ total // snaps+ total clones totalCloneCount = totalCount + totalCount + 1 - if len(imageList) != totalCloneCount { - e2elog.Logf("backend images not matching kubernetes resource count,image count %d kubernetes resource count %d", len(imageList), totalCount+totalCount+1) - Fail("validate backend images failed") - } - + validateRBDImageCount(f, totalCloneCount) // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC with error %v", err) } - imageList = listRBDImages(f) - totalSnapCount := totalCount + totalCount // total images in cluster is total snaps+ total clones - if len(imageList) != totalSnapCount { - e2elog.Logf("backend images not matching kubernetes resource count,image count %d kubernetes resource count %d", len(imageList), totalCount+totalCount) - Fail("validate backend images failed") - } + totalSnapCount := totalCount + totalCount + validateRBDImageCount(f, totalSnapCount) wg.Add(totalCount) // delete snapshot for i := 0; i < totalCount; i++ { @@ -434,19 +480,14 @@ var _ = Describe("RBD", func() { s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) err = deleteSnapshot(&s, deployTimeout) if err != nil { - e2elog.Logf("failed to delete snapshot %v", err) - Fail(err.Error()) + e2elog.Failf("failed to delete snapshot with error %v", err) } w.Done() }(&wg, i, snap) } wg.Wait() - imageList = listRBDImages(f) - if len(imageList) != totalCount { - e2elog.Logf("backend images not matching kubernetes snap count,image count %d kubernetes resource count %d", len(imageList), totalCount) - Fail("validate backend images failed") - } + validateRBDImageCount(f, totalCount) wg.Add(totalCount) // delete clone and app for i := 0; i < totalCount; i++ { @@ -455,27 +496,21 @@ var _ = Describe("RBD", func() { p.Spec.DataSource.Name = name err = deletePVCAndApp(name, f, &p, &a) if err != nil { - e2elog.Logf("failed to delete pvc and app %v", err) - Fail(err.Error()) + e2elog.Failf("failed to delete PVC and application with error %v", err) } w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() // validate created backend rbd images - images = listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) } }) By("create a PVC-PVC clone and bind it to an app", func() { v, err := f.ClientSet.Discovery().ServerVersion() if err != nil { - e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) + e2elog.Failf("failed to get server version with error %v", err) } // pvc clone is only supported from v1.16+ if v.Major > "1" || (v.Major == "1" && v.Minor >= "16") { @@ -483,30 +518,25 @@ var _ = Describe("RBD", func() { totalCount := 10 pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 1 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 1) - Fail("validate backend image failed") - } - + validateRBDImageCount(f, 1) pvcClone, err := loadPVC(pvcSmartClonePath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvcClone.Spec.DataSource.Name = pvc.Name pvcClone.Namespace = f.UniqueName appClone, err := loadApp(appSmartClonePath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } appClone.Namespace = f.UniqueName wg.Add(totalCount) @@ -516,35 +546,25 @@ var _ = Describe("RBD", func() { name := fmt.Sprintf("%s%d", f.UniqueName, n) err = createPVCAndApp(name, f, &p, &a, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC with error %v", err) } w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() - images = listRBDImages(f) // total images in cluster is 1 parent rbd image+ total // temporary clone+ total clones totalCloneCount := totalCount + totalCount + 1 - if len(images) != totalCloneCount { - e2elog.Logf("backend images not matching kubernetes resource count,image count %d kubernetes resource count %d", len(images), totalCloneCount) - Fail("validate backend images failed") - } - + validateRBDImageCount(f, totalCloneCount) // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC with error %v", err) } - images = listRBDImages(f) totalCloneCount = totalCount + totalCount - // total images in cluster is total snaps+ total clones - if len(images) != totalCloneCount { - e2elog.Logf("backend images not matching kubernetes resource count,image count %d kubernetes resource count %d", len(images), totalCloneCount) - Fail("validate backend images failed") - } + validateRBDImageCount(f, totalCloneCount) wg.Add(totalCount) // delete clone and app for i := 0; i < totalCount; i++ { @@ -553,210 +573,186 @@ var _ = Describe("RBD", func() { p.Spec.DataSource.Name = name err = deletePVCAndApp(name, f, &p, &a) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC and application with error %v", err) } w.Done() }(&wg, i, *pvcClone, *appClone) } wg.Wait() - images = listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend images not matching kubernetes snap count,image count %d kubernetes resource count %d", len(images), 0) - Fail("validate backend images failed") - } + validateRBDImageCount(f, 0) } }) - By("create a block type PVC and Bind it to an app", func() { - validatePVCAndAppBinding(rawPvcPath, rawAppPath, f) + By("create a block type PVC and bind it to an app", func() { + err := validatePVCAndAppBinding(rawPvcPath, rawAppPath, f) + if err != nil { + e2elog.Failf("failed to validate pvc and application binding with error %v", err) + } }) By("create/delete multiple PVCs and Apps", func() { totalCount := 2 pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName - // create pvc and app + // create PVC and app for i := 0; i < totalCount; i++ { name := fmt.Sprintf("%s%d", f.UniqueName, i) err := createPVCAndApp(name, f, pvc, app, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC and application with error %v", err) } } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != totalCount { - e2elog.Logf("backend image creation not matching pvc count, image count = % pvc count %d", len(images), totalCount) - Fail("validate multiple pvc failed") - } - - // delete pvc and app + validateRBDImageCount(f, totalCount) + // delete PVC and app for i := 0; i < totalCount; i++ { name := fmt.Sprintf("%s%d", f.UniqueName, i) err := deletePVCAndApp(name, f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC and application with error %v", err) } } // validate created backend rbd images - images = listRBDImages(f) - if len(images) > 0 { - e2elog.Logf("left out rbd backend images count %d", len(images)) - Fail("validate multiple pvc failed") - } + validateRBDImageCount(f, 0) }) - By("check data persist after recreating pod with same pvc", func() { + By("check data persist after recreating pod", func() { err := checkDataPersist(pvcPath, appPath, f) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to check data persist with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) }) By("Resize Filesystem PVC and check application directory size", func() { v, err := f.ClientSet.Discovery().ServerVersion() if err != nil { - e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) + e2elog.Failf("failed to get server version with error with error %v", err) } // Resize 0.3.0 is only supported from v1.15+ if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") { err := resizePVCAndValidateSize(pvcPath, appPath, f) if err != nil { - e2elog.Logf("failed to resize filesystem PVC %v", err) - Fail(err.Error()) + e2elog.Failf("failed to resize filesystem PVC %v", err) } - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"csi.storage.k8s.io/fstype": "xfs"}) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"csi.storage.k8s.io/fstype": "xfs"}) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } err = resizePVCAndValidateSize(pvcPath, appPath, f) if err != nil { - e2elog.Logf("failed to resize filesystem PVC %v", err) - Fail(err.Error()) + e2elog.Failf("failed to resize filesystem PVC with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) } }) By("Resize Block PVC and check Device size", func() { v, err := f.ClientSet.Discovery().ServerVersion() if err != nil { - e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) + e2elog.Failf("failed to get server version with error with error %v", err) } // Block PVC resize is supported in kubernetes 1.16+ if v.Major > "1" || (v.Major == "1" && v.Minor >= "16") { err := resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) if err != nil { - e2elog.Logf("failed to resize block PVC %v", err) - Fail(err.Error()) + e2elog.Failf("failed to resize block PVC with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) } }) By("Test unmount after nodeplugin restart", func() { pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC and application with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 1 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 1) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 1) // delete rbd nodeplugin pods err = deletePodWithLabel("app=csi-rbdplugin", cephCSINamespace, false) if err != nil { - Fail(err.Error()) + e2elog.Failf("fail to delete pod with error %v", err) } // wait for nodeplugin pods to come up err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for daemonset pods with error %v", err) } err = deletePVCAndApp("", f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC and application with error %v", err) } // validate created backend rbd images - images = listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) }) By("create PVC in storageClass with volumeNamePrefix", func() { volumeNamePrefix := "foo-bar-" - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"volumeNamePrefix": volumeNamePrefix}) - + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, map[string]string{"volumeNamePrefix": volumeNamePrefix}) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } // set up PVC pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 1 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 1) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 1) // list RBD images and check if one of them has the same prefix foundIt := false - for _, imgName := range listRBDImages(f) { + images, err := listRBDImages(f) + if err != nil { + e2elog.Failf("failed to list rbd images with error %v", err) + } + for _, imgName := range images { fmt.Printf("Checking prefix on %s\n", imgName) if strings.HasPrefix(imgName, volumeNamePrefix) { foundIt = true @@ -767,223 +763,243 @@ var _ = Describe("RBD", func() { // clean up after ourselves err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC with error %v", err) } // validate created backend rbd images - images = listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, nil) + validateRBDImageCount(f, 0) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } if !foundIt { - Fail(fmt.Sprintf("could not find image with prefix %s", volumeNamePrefix)) + e2elog.Failf("could not find image with prefix %s", volumeNamePrefix) } }) By("validate RBD static FileSystem PVC", func() { err := validateRBDStaticPV(f, appPath, false) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to validate rbd static pv with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) }) By("validate RBD static Block PVC", func() { err := validateRBDStaticPV(f, rawAppPath, true) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to validate rbd block pv with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) }) By("validate mount options in app pod", func() { mountFlags := []string{"discard"} err := checkMountOptions(pvcPath, appPath, f, mountFlags) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to check mount options with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) }) By("creating an app with a PVC, using a topology constrained StorageClass", func() { By("checking node has required CSI topology labels set", func() { - checkNodeHasLabel(f.ClientSet, nodeCSIRegionLabel, regionValue) - checkNodeHasLabel(f.ClientSet, nodeCSIZoneLabel, zoneValue) + err := checkNodeHasLabel(f.ClientSet, nodeCSIRegionLabel, regionValue) + if err != nil { + e2elog.Failf("failed to check node label with error %v", err) + } + err = checkNodeHasLabel(f.ClientSet, nodeCSIZoneLabel, zoneValue) + if err != nil { + e2elog.Failf("failed to check node label with error %v", err) + } }) By("creating a StorageClass with delayed binding mode and CSI topology parameter") - deleteResource(rbdExamplePath + "storageclass.yaml") + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"domainSegments\":" + "[{\"domainLabel\":\"region\",\"value\":\"" + regionValue + "\"}," + "{\"domainLabel\":\"zone\",\"value\":\"" + zoneValue + "\"}]}]" - createRBDStorageClass(f.ClientSet, f, + err = createRBDStorageClass(f.ClientSet, f, map[string]string{"volumeBindingMode": "WaitForFirstConsumer"}, map[string]string{"topologyConstrainedPools": topologyConstraint}) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } By("creating an app using a PV from the delayed binding mode StorageClass") - pvc, app := createPVCAndAppBinding(pvcPath, appPath, f, 0) - - By("ensuring created PV has required node selector values populated") - checkPVSelectorValuesForPVC(f, pvc) - - By("ensuring created PV has its image in the topology specific pool") - err := checkPVCImageInPool(f, pvc, rbdTopologyPool) + pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, 0) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC and application with error %v", err) + } + By("ensuring created PV has required node selector values populated") + err = checkPVSelectorValuesForPVC(f, pvc) + if err != nil { + e2elog.Failf("failed to check pv selector values with error %v", err) + } + By("ensuring created PV has its image in the topology specific pool") + err = checkPVCImageInPool(f, pvc, rbdTopologyPool) + if err != nil { + e2elog.Failf("failed to check image in pool with error %v", err) } By("ensuring created PV has its image journal in the topology specific pool") err = checkPVCImageJournalInPool(f, pvc, rbdTopologyPool) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to check image journal with error %v", err) } By("ensuring created PV has its CSI journal in the CSI journal specific pool") err = checkPVCCSIJournalInPool(f, pvc, "replicapool") if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to check csi journal in pool with error %v", err) } err = deletePVCAndApp("", f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC and application with error %v", err) } By("checking if data pool parameter is honored", func() { - deleteResource(rbdExamplePath + "storageclass.yaml") + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } topologyConstraint := "[{\"poolName\":\"" + rbdTopologyPool + "\",\"dataPool\":\"" + rbdTopologyDataPool + "\",\"domainSegments\":" + "[{\"domainLabel\":\"region\",\"value\":\"" + regionValue + "\"}," + "{\"domainLabel\":\"zone\",\"value\":\"" + zoneValue + "\"}]}]" - createRBDStorageClass(f.ClientSet, f, + err = createRBDStorageClass(f.ClientSet, f, map[string]string{"volumeBindingMode": "WaitForFirstConsumer"}, map[string]string{"topologyConstrainedPools": topologyConstraint}) - + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } By("creating an app using a PV from the delayed binding mode StorageClass with a data pool") - pvc, app = createPVCAndAppBinding(pvcPath, appPath, f, 0) + pvc, app, err = createPVCAndAppBinding(pvcPath, appPath, f, 0) + if err != nil { + e2elog.Failf("failed to create PVC and application with error %v", err) + } By("ensuring created PV has its image in the topology specific pool") err = checkPVCImageInPool(f, pvc, rbdTopologyPool) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to check pvc image in pool with error %v", err) } By("ensuring created image has the right data pool parameter set") err = checkPVCDataPoolForImageInPool(f, pvc, rbdTopologyPool, rbdTopologyDataPool) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to check data pool for image with error %v", err) } // cleanup and undo changes made by the test err = deletePVCAndApp("", f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC and application with error %v", err) } }) // cleanup and undo changes made by the test - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, nil) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } }) // Mount pvc to pod with invalid mount option,expected that // mounting will fail By("Mount pvc to pod with invalid mount option", func() { - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, map[string]string{rbdmountOptions: "debug,invalidOption"}, nil) + err := deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, map[string]string{rbdmountOptions: "debug,invalidOption"}, nil) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 1 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 1) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 1) + // create an app and wait for 1 min for it to go to running state err = createApp(f.ClientSet, app, 1) if err == nil { - Fail("application should not go to running state due to invalid mount option") + e2elog.Failf("application should not go to running state due to invalid mount option") } err = deletePVCAndApp("", f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC and application with error %v", err) } // validate created backend rbd images - images = listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") + validateRBDImageCount(f, 0) + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) } - deleteResource(rbdExamplePath + "storageclass.yaml") - createRBDStorageClass(f.ClientSet, f, nil, nil) }) By("create ROX PVC clone and mount it to multiple pods", func() { v, err := f.ClientSet.Discovery().ServerVersion() if err != nil { - e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) + e2elog.Failf("failed to get server version with error %v", err) } // snapshot beta is only supported from v1.17+ if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") { - // create pvc and bind it to an app + // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC and application with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 1 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 1) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 1) // delete pod as we should not create snapshot for in-use pvc err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete application with error %v", err) } snap := getSnapshot(snapshotPath) @@ -992,19 +1008,15 @@ var _ = Describe("RBD", func() { err = createSnapshot(&snap, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create snapshot with error %v", err) } // validate created backend rbd images - images = listRBDImages(f) // parent PVC + snapshot totalImages := 2 - if len(images) != totalImages { - e2elog.Logf("backend image count %d expected image count %d", len(images), totalImages) - Fail("validate backend image failed") - } + validateRBDImageCount(f, totalImages) pvcClone, err := loadPVC(pvcClonePath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } // create clone PVC as ROX @@ -1012,26 +1024,23 @@ var _ = Describe("RBD", func() { pvcClone.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany} err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC with error %v", err) } // validate created backend rbd images // parent pvc+ snapshot + clone totalImages = 3 - images = listRBDImages(f) - if len(images) != totalImages { - e2elog.Logf("backend image count %d expected image count %d", len(images), totalImages) - Fail("validate backend image failed") - } + validateRBDImageCount(f, totalImages) + appClone, err := loadApp(appClonePath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } totalCount := 2 appClone.Namespace = f.UniqueName appClone.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name - // create pvc and app + // create PVC and app for i := 0; i < totalCount; i++ { name := fmt.Sprintf("%s%d", f.UniqueName, i) label := map[string]string{ @@ -1041,7 +1050,7 @@ var _ = Describe("RBD", func() { appClone.Name = name err = createApp(f.ClientSet, appClone, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create application with error %v", err) } } @@ -1055,7 +1064,7 @@ var _ = Describe("RBD", func() { _, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), appClone.Namespace, &opt) readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath) if !strings.Contains(stdErr, readOnlyErr) { - Fail(stdErr) + e2elog.Failf(stdErr) } } @@ -1065,66 +1074,71 @@ var _ = Describe("RBD", func() { appClone.Name = name err = deletePod(appClone.Name, appClone.Namespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete application with error %v", err) } } - // delete pvc clone + // delete PVC clone err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC with error %v", err) } // delete snapshot err = deleteSnapshot(&snap, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete snapshot with error %v", err) } // delete parent pvc err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC with error %v", err) } // validate created backend rbd images - images = listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) } }) By("ensuring all operations will work within a rados namespace", func() { updateConfigMap := func(radosNS string) { radosNamespace = radosNS - deleteConfigMap(rbdDirPath) - createConfigMap(rbdDirPath, f.ClientSet, f) - createRadosNamespace(f) - + err := deleteConfigMap(rbdDirPath) + if err != nil { + e2elog.Failf("failed to delete configmap with Error: %v", err) + } + err = createConfigMap(rbdDirPath, f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create configmap with error %v", err) + } + err = createRadosNamespace(f) + if err != nil { + e2elog.Failf("failed to create rados namespace with error %v", err) + } // delete csi pods - err := deletePodWithLabel("app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)", + err = deletePodWithLabel("app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)", cephCSINamespace, false) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete pods with labels with error %v", err) } // wait for csi pods to come up err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for daemonset pods with error %v", err) } err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for deployment to be in running state with error %v", err) } } updateConfigMap("e2e-ns") - // Create a PVC and Bind it to an app within the namesapce - validatePVCAndAppBinding(pvcPath, appPath, f) - + // Create a PVC and bind it to an app within the namesapce + err := validatePVCAndAppBinding(pvcPath, appPath, f) + if err != nil { + e2elog.Failf("failed to validate pvc and application binding with error %v", err) + } v, err := f.ClientSet.Discovery().ServerVersion() if err != nil { - e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) + e2elog.Failf("failed to get server version with error with error %v", err) } // Resize Block PVC and check Device size within the namespace @@ -1132,8 +1146,7 @@ var _ = Describe("RBD", func() { if v.Major > "1" || (v.Major == "1" && v.Minor >= "16") { err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f) if err != nil { - e2elog.Logf("failed to resize block PVC %v", err) - Fail(err.Error()) + e2elog.Failf("failed to resize block PVC with error %v", err) } } @@ -1142,44 +1155,37 @@ var _ = Describe("RBD", func() { if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") { pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName - e2elog.Logf("The PVC template %+v", pvc) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 1 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 1) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 1) + snap := getSnapshot(snapshotPath) snap.Namespace = f.UniqueName snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name err = createSnapshot(&snap, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create snapshot with error %v", err) } - expectedImages := len(images) + 1 - images = listRBDImages(f) - if len(images) != expectedImages { - e2elog.Logf("backend images not matching kubernetes resource count,image count %d kubernetes resource count %d", len(images), 2) - Fail("validate backend images failed") + validateRBDImageCount(f, 2) + + err = validatePVCAndAppBinding(pvcClonePath, appClonePath, f) + if err != nil { + e2elog.Failf("failed to validate pvc and application binding with error %v", err) } - - validatePVCAndAppBinding(pvcClonePath, appClonePath, f) - err = deleteSnapshot(&snap, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete snapshot with error %v", err) } err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC with error %v", err) } } @@ -1187,17 +1193,17 @@ var _ = Describe("RBD", func() { }) By("Mount pvc as readonly in pod", func() { - // create pvc and bind it to an app + // create PVC and bind it to an app pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load PVC with error %v", err) } pvc.Namespace = f.UniqueName app, err := loadApp(appPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName @@ -1209,14 +1215,10 @@ var _ = Describe("RBD", func() { app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create PVC and application with error %v", err) } // validate created backend rbd images - images := listRBDImages(f) - if len(images) != 1 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 1) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 1) opt := metav1.ListOptions{ LabelSelector: fmt.Sprintf("app=%s", app.Name), @@ -1226,28 +1228,24 @@ var _ = Describe("RBD", func() { _, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt) readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath) if !strings.Contains(stdErr, readOnlyErr) { - Fail(stdErr) + e2elog.Failf(stdErr) } - // delete pvc and app + // delete PVC and app err = deletePVCAndApp("", f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC and application with error %v", err) } // validate created backend rbd images - images = listRBDImages(f) - if len(images) != 0 { - e2elog.Logf("backend image count %d expected image count %d", len(images), 0) - Fail("validate backend image failed") - } + validateRBDImageCount(f, 0) }) // Make sure this should be last testcase in this file, because // it deletes pool - By("Create a PVC and Delete PVC when backend pool deleted", func() { + By("Create a PVC and delete PVC when backend pool deleted", func() { err := pvcDeleteWhenPoolNotFound(pvcPath, false, f) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete PVC when pool not found with error %v", err) } }) }) diff --git a/e2e/rbd_helper.go b/e2e/rbd_helper.go new file mode 100644 index 000000000..9f7813c56 --- /dev/null +++ b/e2e/rbd_helper.go @@ -0,0 +1,390 @@ +package e2e + +import ( + "context" + "encoding/json" + "fmt" + "regexp" + "strings" + + v1 "k8s.io/api/core/v1" + scv1 "k8s.io/api/storage/v1" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/kubernetes/test/e2e/framework" + e2elog "k8s.io/kubernetes/test/e2e/framework/log" +) + +func imageSpec(pool, image string) string { + if radosNamespace != "" { + return pool + "/" + radosNamespace + "/" + image + } + return pool + "/" + image +} + +func rbdOptions(pool string) string { + if radosNamespace != "" { + return "--pool=" + pool + " --namespace " + radosNamespace + } + return "--pool=" + pool +} + +func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, scOptions, parameters map[string]string) error { + scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "storageclass.yaml") + sc, err := getStorageClass(scPath) + if err != nil { + return nil + } + sc.Parameters["pool"] = defaultRBDPool + sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = rookNamespace + sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = rbdProvisionerSecretName + + sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = rookNamespace + sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = rbdProvisionerSecretName + + sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = rookNamespace + sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = rbdNodePluginSecretName + + fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error getting fsid %v", stdErr) + } + // remove new line present in fsID + fsID = strings.Trim(fsID, "\n") + + sc.Parameters["clusterID"] = fsID + for k, v := range parameters { + sc.Parameters[k] = v + } + sc.Namespace = cephCSINamespace + + if scOptions["volumeBindingMode"] == "WaitForFirstConsumer" { + value := scv1.VolumeBindingWaitForFirstConsumer + sc.VolumeBindingMode = &value + } + + // comma separated mount options + if opt, ok := scOptions[rbdmountOptions]; ok { + mOpt := strings.Split(opt, ",") + sc.MountOptions = append(sc.MountOptions, mOpt...) + } + _, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) + return err +} + +func createRadosNamespace(f *framework.Framework) error { + stdOut, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd namespace ls --pool=%s", defaultRBDPool), rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error listing rbd namespace %v", stdErr) + } + if !strings.Contains(stdOut, radosNamespace) { + _, stdErr, err = execCommandInToolBoxPod(f, + fmt.Sprintf("rbd namespace create %s", rbdOptions(defaultRBDPool)), rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error creating rbd namespace %v", stdErr) + } + } + stdOut, stdErr, err = execCommandInToolBoxPod(f, + fmt.Sprintf("rbd namespace ls --pool=%s", rbdTopologyPool), rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error listing rbd namespace %v", stdErr) + } + + if !strings.Contains(stdOut, radosNamespace) { + _, stdErr, err = execCommandInToolBoxPod(f, + fmt.Sprintf("rbd namespace create %s", rbdOptions(rbdTopologyPool)), rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error creating rbd namespace %v", stdErr) + } + } + return nil +} + +func createRBDSecret(c kubernetes.Interface, f *framework.Framework) error { + scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "secret.yaml") + sc, err := getSecret(scPath) + if err != nil { + return err + } + adminKey, stdErr, err := execCommandInToolBoxPod(f, "ceph auth get-key client.admin", rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error getting admin key %v", stdErr) + } + sc.StringData["userID"] = adminUser + sc.StringData["userKey"] = adminKey + sc.Namespace = cephCSINamespace + _, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{}) + if err != nil { + return err + } + + err = updateSecretForEncryption(c) + return err +} + +type imageInfoFromPVC struct { + imageID string + imageName string + csiVolumeHandle string + pvName string +} + +// getImageInfoFromPVC reads volume handle of the bound PV to the passed in PVC, +// and returns imageInfoFromPVC or error. +func getImageInfoFromPVC(pvcNamespace, pvcName string, f *framework.Framework) (imageInfoFromPVC, error) { + var imageData imageInfoFromPVC + + c := f.ClientSet.CoreV1() + pvc, err := c.PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) + if err != nil { + return imageData, err + } + + pv, err := c.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) + if err != nil { + return imageData, err + } + + imageIDRegex := regexp.MustCompile(`(\w+\-?){5}$`) + imageID := imageIDRegex.FindString(pv.Spec.CSI.VolumeHandle) + + imageData = imageInfoFromPVC{ + imageID: imageID, + imageName: fmt.Sprintf("csi-vol-%s", imageID), + csiVolumeHandle: pv.Spec.CSI.VolumeHandle, + pvName: pv.Name, + } + return imageData, nil +} + +func getImageMeta(rbdImageSpec, metaKey string, f *framework.Framework) (string, error) { + cmd := fmt.Sprintf("rbd image-meta get %s %s", rbdImageSpec, metaKey) + stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace) + if err != nil { + return "", err + } + if stdErr != "" { + return strings.TrimSpace(stdOut), fmt.Errorf(stdErr) + } + return strings.TrimSpace(stdOut), nil +} + +func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framework.Framework) error { + pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) + if err != nil { + return err + } + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName) + encryptedState, err := getImageMeta(rbdImageSpec, ".rbd.csi.ceph.com/encrypted", f) + if err != nil { + return err + } + if encryptedState != "encrypted" { + return fmt.Errorf("%v not equal to encrypted", encryptedState) + } + + volumeMountPath := app.Spec.Containers[0].VolumeMounts[0].MountPath + mountType, err := getMountType(app.Name, app.Namespace, volumeMountPath, f) + if err != nil { + return err + } + if mountType != "crypt" { + return fmt.Errorf("%v not equal to crypt", mountType) + } + + if kms == "vault" { + // check new passphrase created + _, stdErr := readVaultSecret(imageData.csiVolumeHandle, f) + if stdErr != "" { + return fmt.Errorf("failed to read passphrase from vault: %s", stdErr) + } + } + + err = deletePVCAndApp("", f, pvc, app) + if err != nil { + return err + } + + if kms == "vault" { + // check new passphrase created + stdOut, _ := readVaultSecret(imageData.csiVolumeHandle, f) + if stdOut != "" { + return fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut) + } + } + return nil +} + +func listRBDImages(f *framework.Framework) ([]string, error) { + var imgInfos []string + + stdout, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd ls --format=json %s", rbdOptions(defaultRBDPool)), rookNamespace) + if err != nil { + return imgInfos, err + } + if stdErr != "" { + return imgInfos, fmt.Errorf("failed to list images %v", stdErr) + } + + err = json.Unmarshal([]byte(stdout), &imgInfos) + if err != nil { + return imgInfos, err + } + return imgInfos, nil +} + +func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error { + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + + cmd := fmt.Sprintf("rbd rm %s %s", rbdOptions(defaultRBDPool), imageData.imageName) + _, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace) + return err +} + +func deletePool(name string, cephfs bool, f *framework.Framework) error { + var cmds = []string{} + if cephfs { + // ceph fs fail + // ceph fs rm myfs --yes-i-really-mean-it + // ceph osd pool delete myfs-metadata myfs-metadata + // --yes-i-really-mean-it + // ceph osd pool delete myfs-data0 myfs-data0 + // --yes-i-really-mean-it + cmds = append(cmds, fmt.Sprintf("ceph fs fail %s", name), + fmt.Sprintf("ceph fs rm %s --yes-i-really-mean-it", name), + fmt.Sprintf("ceph osd pool delete %s-metadata %s-metadata --yes-i-really-really-mean-it", name, name), + fmt.Sprintf("ceph osd pool delete %s-data0 %s-data0 --yes-i-really-really-mean-it", name, name)) + } else { + // ceph osd pool delete replicapool replicapool + // --yes-i-really-mean-it + cmds = append(cmds, fmt.Sprintf("ceph osd pool delete %s %s --yes-i-really-really-mean-it", name, name)) + } + + for _, cmd := range cmds { + // discard stdErr as some commands prints warning in strErr + _, _, err := execCommandInToolBoxPod(f, cmd, rookNamespace) + if err != nil { + return err + } + } + return nil +} + +func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) (string, error) { + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return "", err + } + + stdOut, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rbd info %s", imageSpec(pool, imageData.imageName)), rookNamespace) + if err != nil { + return "", err + } + if stdErr != "" { + return "", fmt.Errorf("failed to get rbd info %v", stdErr) + } + + if radosNamespace != "" { + e2elog.Logf("found image %s in pool %s namespace %s", imageData.imageName, pool, radosNamespace) + } else { + e2elog.Logf("found image %s in pool %s", imageData.imageName, pool) + } + + return stdOut, nil +} + +func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error { + _, err := getPVCImageInfoInPool(f, pvc, pool) + + return err +} + +func checkPVCDataPoolForImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool, dataPool string) error { + stdOut, err := getPVCImageInfoInPool(f, pvc, pool) + if err != nil { + return err + } + + if !strings.Contains(stdOut, "data_pool: "+dataPool) { + return fmt.Errorf("missing data pool value in image info, got info (%s)", stdOut) + } + + return nil +} + +func checkPVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error { + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + + _, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rados listomapkeys %s csi.volume.%s", rbdOptions(pool), imageData.imageID), rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("failed to listomapkeys %v", stdErr) + } + + if radosNamespace != "" { + e2elog.Logf("found image journal %s in pool %s namespace %s", "csi.volume."+imageData.imageID, pool, radosNamespace) + } else { + e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool) + } + + return nil +} + +func checkPVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error { + imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) + if err != nil { + return err + } + + _, stdErr, err := execCommandInToolBoxPod(f, + fmt.Sprintf("rados getomapval %s csi.volumes.default csi.volume.%s", rbdOptions(pool), imageData.pvName), rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("error getting fsid %v", stdErr) + } + + if radosNamespace != "" { + e2elog.Logf("found CSI journal entry %s in pool %s namespace %s", "csi.volume."+imageData.pvName, pool, radosNamespace) + } else { + e2elog.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool) + } + + return nil +} diff --git a/e2e/resize.go b/e2e/resize.go index f60bbd060..a1040d52a 100644 --- a/e2e/resize.go +++ b/e2e/resize.go @@ -161,8 +161,10 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd, return wait.PollImmediate(poll, timeout, func() (bool, error) { e2elog.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds())) - output, stdErr := execCommandInPod(f, cmd, ns, opt) - + output, stdErr, err := execCommandInPod(f, cmd, ns, opt) + if err != nil { + return false, err + } if stdErr != "" { e2elog.Logf("failed to execute command in app pod %v", stdErr) return false, nil diff --git a/e2e/snapshot.go b/e2e/snapshot.go index 672549145..8b6f0c5da 100644 --- a/e2e/snapshot.go +++ b/e2e/snapshot.go @@ -113,32 +113,46 @@ func deleteSnapshot(snap *snapapi.VolumeSnapshot, t int) error { }) } -func createRBDSnapshotClass(f *framework.Framework) { +func createRBDSnapshotClass(f *framework.Framework) error { scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "snapshotclass.yaml") sc := getSnapshotClass(scPath) sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace - fsID, stdErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) - Expect(stdErr).Should(BeEmpty()) + fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("failed to get fsid from ceph cluster %s", stdErr) + } fsID = strings.Trim(fsID, "\n") sc.Parameters["clusterID"] = fsID sclient, err := newSnapshotClient() - Expect(err).Should(BeNil()) + if err != nil { + return err + } _, err = sclient.SnapshotV1beta1().VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) - Expect(err).Should(BeNil()) + return err } -func createCephFSSnapshotClass(f *framework.Framework) { +func createCephFSSnapshotClass(f *framework.Framework) error { scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "snapshotclass.yaml") sc := getSnapshotClass(scPath) sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace - fsID, stdErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) - Expect(stdErr).Should(BeEmpty()) + fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("failed to get fsid from ceph cluster %s", stdErr) + } fsID = strings.Trim(fsID, "\n") sc.Parameters["clusterID"] = fsID sclient, err := newSnapshotClient() - Expect(err).Should(BeNil()) + if err != nil { + return err + } _, err = sclient.SnapshotV1beta1().VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) - Expect(err).Should(BeNil()) + return err } diff --git a/e2e/staticpvc.go b/e2e/staticpvc.go index 6a38b7547..89f2c1e8a 100644 --- a/e2e/staticpvc.go +++ b/e2e/staticpvc.go @@ -91,7 +91,10 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e c := f.ClientSet - fsID, e := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) + fsID, e, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) + if err != nil { + return err + } if e != "" { return fmt.Errorf("failed to get fsid from ceph cluster %s", e) } @@ -101,7 +104,10 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e // create rbd image cmd := fmt.Sprintf("rbd create %s --size=%d --image-feature=layering %s", rbdImageName, 4096, rbdOptions(defaultRBDPool)) - _, e = execCommandInToolBoxPod(f, cmd, rookNamespace) + _, e, err = execCommandInToolBoxPod(f, cmd, rookNamespace) + if err != nil { + return err + } if e != "" { return fmt.Errorf("failed to create rbd image %s", e) } @@ -115,7 +121,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e pv := getStaticPV(pvName, rbdImageName, size, "csi-rbd-secret", cephCSINamespace, sc, "rbd.csi.ceph.com", isBlock, opt) - _, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) + _, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("PV Create API error: %w", err) } @@ -155,10 +161,11 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e } cmd = fmt.Sprintf("rbd rm %s %s", rbdImageName, rbdOptions(defaultRBDPool)) - execCommandInToolBoxPod(f, cmd, rookNamespace) - return nil + _, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace) + return err } +// nolint:gocyclo // reduce complexity func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) error { opt := make(map[string]string) var ( @@ -180,7 +187,10 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro LabelSelector: "app=rook-ceph-tools", } - fsID, e := execCommandInPod(f, "ceph fsid", rookNamespace, &listOpt) + fsID, e, err := execCommandInPod(f, "ceph fsid", rookNamespace, &listOpt) + if err != nil { + return err + } if e != "" { return fmt.Errorf("failed to get fsid from ceph cluster %s", e) } @@ -193,21 +203,30 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro // create subvolumegroup, command will work even if group is already present. cmd := fmt.Sprintf("ceph fs subvolumegroup create %s %s", fsName, groupName) - _, e = execCommandInPod(f, cmd, rookNamespace, &listOpt) + _, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt) + if err != nil { + return err + } if e != "" { return fmt.Errorf("failed to create subvolumegroup %s", e) } // create subvolume cmd = fmt.Sprintf("ceph fs subvolume create %s %s %s --size %s", fsName, cephFsVolName, groupName, size) - _, e = execCommandInPod(f, cmd, rookNamespace, &listOpt) + _, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt) + if err != nil { + return err + } if e != "" { return fmt.Errorf("failed to create subvolume %s", e) } // get rootpath cmd = fmt.Sprintf("ceph fs subvolume getpath %s %s %s", fsName, cephFsVolName, groupName) - rootPath, e := execCommandInPod(f, cmd, rookNamespace, &listOpt) + rootPath, e, err := execCommandInPod(f, cmd, rookNamespace, &listOpt) + if err != nil { + return err + } if e != "" { return fmt.Errorf("failed to get rootpath %s", e) } @@ -215,17 +234,22 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro rootPath = strings.Trim(rootPath, "\n") // create secret - userID := "admin" // nolint - secret := getSecret(scPath) - adminKey, e := execCommandInPod(f, "ceph auth get-key client.admin", rookNamespace, &listOpt) + secret, err := getSecret(scPath) + if err != nil { + return err + } + adminKey, e, err := execCommandInPod(f, "ceph auth get-key client.admin", rookNamespace, &listOpt) + if err != nil { + return err + } if e != "" { return fmt.Errorf("failed to get adminKey %s", e) } - secret.StringData["userID"] = userID + secret.StringData["userID"] = adminUser secret.StringData["userKey"] = adminKey secret.Name = secretName secret.Namespace = cephCSINamespace - _, err := c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &secret, metav1.CreateOptions{}) + _, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &secret, metav1.CreateOptions{}) if err != nil { return fmt.Errorf("failed to create secret, error %w", err) } @@ -280,14 +304,20 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro // delete subvolume cmd = fmt.Sprintf("ceph fs subvolume rm %s %s %s", fsName, cephFsVolName, groupName) - _, e = execCommandInPod(f, cmd, rookNamespace, &listOpt) + _, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt) + if err != nil { + return err + } if e != "" { return fmt.Errorf("failed to remove sub-volume %s", e) } // delete subvolume group cmd = fmt.Sprintf("ceph fs subvolumegroup rm %s %s", fsName, groupName) - _, e = execCommandInPod(f, cmd, rookNamespace, &listOpt) + _, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt) + if err != nil { + return err + } if e != "" { return fmt.Errorf("failed to remove subvolume group %s", e) } diff --git a/e2e/upgrade-cephfs.go b/e2e/upgrade-cephfs.go index cf6fba978..1a661e1b0 100644 --- a/e2e/upgrade-cephfs.go +++ b/e2e/upgrade-cephfs.go @@ -22,6 +22,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { app *v1.Pod // cwd stores the initial working directory. cwd string + err error ) // deploy cephfs CSI BeforeEach(func() { @@ -30,26 +31,34 @@ var _ = Describe("CephFS Upgrade Testing", func() { } c = f.ClientSet if cephCSINamespace != defaultNs { - err := createNamespace(c, cephCSINamespace) + err = createNamespace(c, cephCSINamespace) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create namespace with error %v", err) } } // fetch current working directory to switch back // when we are done upgrading. - var err error cwd, err = os.Getwd() if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to getwd with error %v", err) } err = upgradeAndDeployCSI(upgradeVersion, "cephfs") if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to upgrade csi with error %v", err) + } + err = createConfigMap(cephfsDirPath, f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create configmap with error %v", err) + } + err = createCephfsSecret(f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create secret with error %v", err) + } + err = createCephfsStorageClass(f.ClientSet, f, true, "") + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) } - createConfigMap(cephfsDirPath, f.ClientSet, f) - createCephfsSecret(f.ClientSet, f) - createCephfsStorageClass(f.ClientSet, f, true, "") }) AfterEach(func() { if !testCephFS || !upgradeTesting { @@ -63,15 +72,26 @@ var _ = Describe("CephFS Upgrade Testing", func() { // log node plugin logsCSIPods("app=csi-cephfsplugin", c) } - deleteConfigMap(cephfsDirPath) - deleteResource(cephfsExamplePath + "secret.yaml") - deleteResource(cephfsExamplePath + "storageclass.yaml") + err = deleteConfigMap(cephfsDirPath) + if err != nil { + e2elog.Failf("failed to delete configmap with error %v", err) + } + err = deleteResource(cephfsExamplePath + "secret.yaml") + if err != nil { + e2elog.Failf("failed to delete secret with error %v", err) + } + err = deleteResource(cephfsExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } if deployCephFS { deleteCephfsPlugin() if cephCSINamespace != defaultNs { err := deleteNamespace(c, cephCSINamespace) if err != nil { - Fail(err.Error()) + if err != nil { + e2elog.Failf("failed to delete namespace with error %v", err) + } } } } @@ -83,13 +103,13 @@ var _ = Describe("CephFS Upgrade Testing", func() { By("checking provisioner deployment is running") err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err) } - By("checking nodeplugin deamonsets is running") + By("checking nodeplugin deamonset pods are running") err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for daemonset %s with error%v", cephfsDeamonSetName, err) } By("upgrade to latest changes and verify app re-mount", func() { @@ -100,32 +120,31 @@ var _ = Describe("CephFS Upgrade Testing", func() { pvc, err = loadPVC(pvcPath) if pvc == nil { - Fail(err.Error()) + e2elog.Failf("failed to load pvc with error %v", err) } pvc.Namespace = f.UniqueName - e2elog.Logf("The PVC template %+v", pvc) app, err = loadApp(appPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName app.Labels = map[string]string{"app": "upgrade-testing"} pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create pvc and application with error %v", err) } err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete application with error %v", err) } deleteCephfsPlugin() // switch back to current changes. err = os.Chdir(cwd) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to d chdir with error %v", err) } deployCephfsPlugin() @@ -134,7 +153,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { // an earlier release. err = createApp(f.ClientSet, app, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create application with error %v", err) } }) @@ -144,7 +163,6 @@ var _ = Describe("CephFS Upgrade Testing", func() { v, err = f.ClientSet.Discovery().ServerVersion() if err != nil { e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) } // Resize 0.3.0 is only supported from v1.15+ if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") { @@ -153,23 +171,23 @@ var _ = Describe("CephFS Upgrade Testing", func() { } pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to get pvc with error %v", err) } // resize PVC err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to expand pvc with error %v", err) } // wait for application pod to come up after resize err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timout waiting for pod to be in running state with error %v", err) } // validate if resize is successful. err = checkDirSize(app, f, &opt, pvcExpandSize) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to check directory size with error %v", err) } } @@ -178,7 +196,7 @@ var _ = Describe("CephFS Upgrade Testing", func() { By("delete pvc and app") err = deletePVCAndApp("", f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete pvc and application with error %v", err) } }) }) diff --git a/e2e/upgrade-rbd.go b/e2e/upgrade-rbd.go index 3b9f68ef1..691b4ac43 100644 --- a/e2e/upgrade-rbd.go +++ b/e2e/upgrade-rbd.go @@ -32,28 +32,44 @@ var _ = Describe("RBD Upgrade Testing", func() { if cephCSINamespace != defaultNs { err := createNamespace(c, cephCSINamespace) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create namespace with error %v", err) } } - createNodeLabel(f, nodeRegionLabel, regionValue) - createNodeLabel(f, nodeZoneLabel, zoneValue) // fetch current working directory to switch back // when we are done upgrading. var err error cwd, err = os.Getwd() if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to do getwd with error %v", err) } deployVault(f.ClientSet, deployTimeout) err = upgradeAndDeployCSI(upgradeVersion, "rbd") if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to upgrade and deploy CSI with error %v", err) + } + err = createConfigMap(rbdDirPath, f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create configmap with error %v", err) + } + err = createRBDStorageClass(f.ClientSet, f, nil, nil) + if err != nil { + e2elog.Failf("failed to create storageclass with error %v", err) + } + err = createRBDSecret(f.ClientSet, f) + if err != nil { + e2elog.Failf("failed to create secret with error %v", err) + } + + err = createNodeLabel(f, nodeRegionLabel, regionValue) + if err != nil { + e2elog.Failf("failed to create node label with error %v", err) + } + err = createNodeLabel(f, nodeZoneLabel, zoneValue) + if err != nil { + e2elog.Failf("failed to create node label with error %v", err) } - createConfigMap(rbdDirPath, f.ClientSet, f) - createRBDStorageClass(f.ClientSet, f, nil, nil) - createRBDSecret(f.ClientSet, f) }) AfterEach(func() { if !testRBD || !upgradeTesting { @@ -68,21 +84,36 @@ var _ = Describe("RBD Upgrade Testing", func() { logsCSIPods("app=csi-rbdplugin", c) } - deleteConfigMap(rbdDirPath) - deleteResource(rbdExamplePath + "secret.yaml") - deleteResource(rbdExamplePath + "storageclass.yaml") + err := deleteConfigMap(rbdDirPath) + if err != nil { + e2elog.Failf("failed to delete configmap with error %v", err) + } + err = deleteResource(rbdExamplePath + "secret.yaml") + if err != nil { + e2elog.Failf("failed to delete secret with error %v", err) + } + err = deleteResource(rbdExamplePath + "storageclass.yaml") + if err != nil { + e2elog.Failf("failed to delete storageclass with error %v", err) + } deleteVault() if deployRBD { deleteRBDPlugin() if cephCSINamespace != defaultNs { - err := deleteNamespace(c, cephCSINamespace) + err = deleteNamespace(c, cephCSINamespace) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete namespace with error %v", err) } } } - deleteNodeLabel(c, nodeRegionLabel) - deleteNodeLabel(c, nodeZoneLabel) + err = deleteNodeLabel(c, nodeRegionLabel) + if err != nil { + e2elog.Failf("failed to delete node label with error %v", err) + } + err = deleteNodeLabel(c, nodeZoneLabel) + if err != nil { + e2elog.Failf("failed to delete node label with error %v", err) + } }) Context("Test RBD CSI", func() { @@ -93,14 +124,14 @@ var _ = Describe("RBD Upgrade Testing", func() { By("checking provisioner deployment is running", func() { err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err) } }) - By("checking nodeplugin deamonsets is running", func() { + By("checking nodeplugin deamonset pods are running", func() { err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err) } }) @@ -110,31 +141,30 @@ var _ = Describe("RBD Upgrade Testing", func() { var err error pvc, err = loadPVC(pvcPath) if pvc == nil { - Fail(err.Error()) + e2elog.Failf("failed to load pvc with error %v", err) } pvc.Namespace = f.UniqueName - e2elog.Logf("The PVC template %+v", pvc) app, err = loadApp(appPath) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to load application with error %v", err) } app.Namespace = f.UniqueName app.Labels = map[string]string{"app": "upgrade-testing"} pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize) err = createPVCAndApp("", f, pvc, app, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create pvc with error %v", err) } err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete application with error %v", err) } deleteRBDPlugin() err = os.Chdir(cwd) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to change directory with error %v", err) } deployRBDPlugin() @@ -143,7 +173,7 @@ var _ = Describe("RBD Upgrade Testing", func() { app.Labels = map[string]string{"app": "upgrade-testing"} err = createApp(f.ClientSet, app, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to create application with error %v", err) } }) @@ -153,7 +183,6 @@ var _ = Describe("RBD Upgrade Testing", func() { v, err := f.ClientSet.Discovery().ServerVersion() if err != nil { e2elog.Logf("failed to get server version with error %v", err) - Fail(err.Error()) } // Resize 0.3.0 is only supported from v1.15+ if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") { @@ -162,23 +191,23 @@ var _ = Describe("RBD Upgrade Testing", func() { } pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to get pvc with error %v", err) } // resize PVC err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to expand pvc with error %v", err) } // wait for application pod to come up after resize err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { - Fail(err.Error()) + e2elog.Failf("timeout waiting for pod to be in running state with error %v", err) } // validate if resize is successful. err = checkDirSize(app, f, &opt, pvcExpandSize) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to check directory size with error %v", err) } } @@ -187,7 +216,7 @@ var _ = Describe("RBD Upgrade Testing", func() { By("delete pvc and app", func() { err := deletePVCAndApp("", f, pvc, app) if err != nil { - Fail(err.Error()) + e2elog.Failf("failed to delete pvc and application with error %v", err) } }) }) diff --git a/e2e/utils.go b/e2e/utils.go index 3637ef8e0..84227e326 100644 --- a/e2e/utils.go +++ b/e2e/utils.go @@ -7,28 +7,17 @@ import ( "errors" "fmt" "io/ioutil" - "regexp" "strings" "time" - "github.com/ceph/ceph-csi/internal/util" - - . "github.com/onsi/ginkgo" // nolint - . "github.com/onsi/gomega" // nolint - apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" scv1 "k8s.io/api/storage/v1" - apierrs "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/util/wait" utilyaml "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/kubernetes" - "k8s.io/kubernetes/pkg/client/conditions" "k8s.io/kubernetes/test/e2e/framework" e2elog "k8s.io/kubernetes/test/e2e/framework/log" - e2epv "k8s.io/kubernetes/test/e2e/framework/pv" - testutils "k8s.io/kubernetes/test/utils" ) /* #nosec:G101, values not credententials, just a reference to the location.*/ @@ -70,376 +59,40 @@ func initResouces() { vaultAddr = fmt.Sprintf("http://vault.%s.svc.cluster.local:8200", cephCSINamespace) } -func createNamespace(c kubernetes.Interface, name string) error { - timeout := time.Duration(deployTimeout) * time.Minute - ns := &v1.Namespace{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - } - _, err := c.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{}) - if err != nil && !apierrs.IsAlreadyExists(err) { - return err - } - - return wait.PollImmediate(poll, timeout, func() (bool, error) { - _, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - e2elog.Logf("Error getting namespace: '%s': %v", name, err) - if apierrs.IsNotFound(err) { - return false, nil - } - if testutils.IsRetryableAPIError(err) { - return false, nil - } - return false, err - } - return true, nil - }) -} - -func deleteNamespace(c kubernetes.Interface, name string) error { - timeout := time.Duration(deployTimeout) * time.Minute - err := c.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil && !apierrs.IsNotFound(err) { - Fail(err.Error()) - } - return wait.PollImmediate(poll, timeout, func() (bool, error) { - _, err = c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - if apierrs.IsNotFound(err) { - return true, nil - } - e2elog.Logf("Error getting namespace: '%s': %v", name, err) - if testutils.IsRetryableAPIError(err) { - return false, nil - } - return false, err - } - return false, nil - }) -} - -func replaceNamespaceInTemplate(filePath string) (string, error) { - read, err := ioutil.ReadFile(filePath) - if err != nil { - return "", err - } - return strings.ReplaceAll(string(read), "namespace: default", fmt.Sprintf("namespace: %s", cephCSINamespace)), nil -} - -func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error { - timeout := time.Duration(t) * time.Minute - start := time.Now() - e2elog.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns) - - return wait.PollImmediate(poll, timeout, func() (bool, error) { - ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - e2elog.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err) - if strings.Contains(err.Error(), "not found") { - return false, nil - } - if testutils.IsRetryableAPIError(err) { - return false, nil - } - return false, err - } - dNum := ds.Status.DesiredNumberScheduled - ready := ds.Status.NumberReady - e2elog.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ready, dNum, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds())) - if ready != dNum { - return false, nil - } - - return true, nil - }) -} - -// Waits for the deployment to complete. - -func waitForDeploymentComplete(name, ns string, c kubernetes.Interface, t int) error { - var ( - deployment *apps.Deployment - reason string - err error - ) - timeout := time.Duration(t) * time.Minute - err = wait.PollImmediate(poll, timeout, func() (bool, error) { - deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return false, err - } - - // TODO need to check rolling update - - // When the deployment status and its underlying resources reach the - // desired state, we're done - if deployment.Status.Replicas == deployment.Status.ReadyReplicas { - return true, nil - } - e2elog.Logf("deployment status: expected replica count %d running replica count %d", deployment.Status.Replicas, deployment.Status.ReadyReplicas) - reason = fmt.Sprintf("deployment status: %#v", deployment.Status.String()) - return false, nil - }) - - if errors.Is(err, wait.ErrWaitTimeout) { - err = fmt.Errorf("%s", reason) - } - if err != nil { - return fmt.Errorf("error waiting for deployment %q status to match expectation: %w", name, err) - } - return nil -} - -func getCommandInPodOpts(f *framework.Framework, c, ns string, opt *metav1.ListOptions) framework.ExecOptions { - cmd := []string{"/bin/sh", "-c", c} - podList, err := f.PodClientNS(ns).List(context.TODO(), *opt) - framework.ExpectNoError(err) - Expect(podList.Items).NotTo(BeNil()) - Expect(err).Should(BeNil()) - - return framework.ExecOptions{ - Command: cmd, - PodName: podList.Items[0].Name, - Namespace: ns, - ContainerName: podList.Items[0].Spec.Containers[0].Name, - Stdin: nil, - CaptureStdout: true, - CaptureStderr: true, - PreserveWhitespace: true, - } -} - -func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string) { - podPot := getCommandInPodOpts(f, c, ns, opt) - stdOut, stdErr, err := f.ExecWithOptions(podPot) - if stdErr != "" { - e2elog.Logf("stdErr occurred: %v", stdErr) - } - Expect(err).Should(BeNil()) - return stdOut, stdErr -} - -func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, string) { - opt := &metav1.ListOptions{ - LabelSelector: rookTolBoxPodLabel, - } - podPot := getCommandInPodOpts(f, c, ns, opt) - stdOut, stdErr, err := f.ExecWithOptions(podPot) - if stdErr != "" { - e2elog.Logf("stdErr occurred: %v", stdErr) - } - Expect(err).Should(BeNil()) - return stdOut, stdErr -} - -func execCommandInPodAndAllowFail(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string) { - podPot := getCommandInPodOpts(f, c, ns, opt) - stdOut, stdErr, err := f.ExecWithOptions(podPot) - if err != nil { - e2elog.Logf("command %s failed: %v", c, err) - } - return stdOut, stdErr -} - -func getMons(ns string, c kubernetes.Interface) []string { +func getMons(ns string, c kubernetes.Interface) ([]string, error) { opt := metav1.ListOptions{ LabelSelector: "app=rook-ceph-mon", } - svcList, err := c.CoreV1().Services(ns).List(context.TODO(), opt) - Expect(err).Should(BeNil()) services := make([]string, 0) + + svcList, err := c.CoreV1().Services(ns).List(context.TODO(), opt) + if err != nil { + return services, err + } for i := range svcList.Items { s := fmt.Sprintf("%s.%s.svc.cluster.local:%d", svcList.Items[i].Name, svcList.Items[i].Namespace, svcList.Items[i].Spec.Ports[0].Port) services = append(services, s) } - return services + return services, nil } -func getStorageClass(path string) scv1.StorageClass { +func getStorageClass(path string) (scv1.StorageClass, error) { sc := scv1.StorageClass{} err := unmarshal(path, &sc) - Expect(err).Should(BeNil()) - return sc + return sc, err } -func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, enablePool bool, clusterID string) { - scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml") - sc := getStorageClass(scPath) - sc.Parameters["fsName"] = "myfs" - sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = rookNamespace - sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephfsProvisionerSecretName - - sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = rookNamespace - sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = cephfsProvisionerSecretName - - sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = rookNamespace - sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = cephfsNodePluginSecretName - - if enablePool { - sc.Parameters["pool"] = "myfs-data0" - } - fsID, stdErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) - Expect(stdErr).Should(BeEmpty()) - // remove new line present in fsID - fsID = strings.Trim(fsID, "\n") - if clusterID != "" { - fsID = clusterID - } - sc.Namespace = cephCSINamespace - sc.Parameters["clusterID"] = fsID - _, err := c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) - Expect(err).Should(BeNil()) -} - -func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, scOptions, parameters map[string]string) { - scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "storageclass.yaml") - sc := getStorageClass(scPath) - sc.Parameters["pool"] = defaultRBDPool - sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = rookNamespace - sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = rbdProvisionerSecretName - - sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = rookNamespace - sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = rbdProvisionerSecretName - - sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = rookNamespace - sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = rbdNodePluginSecretName - - fsID, stdErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) - Expect(stdErr).Should(BeEmpty()) - // remove new line present in fsID - fsID = strings.Trim(fsID, "\n") - - sc.Parameters["clusterID"] = fsID - for k, v := range parameters { - sc.Parameters[k] = v - } - sc.Namespace = cephCSINamespace - - if scOptions["volumeBindingMode"] == "WaitForFirstConsumer" { - value := scv1.VolumeBindingWaitForFirstConsumer - sc.VolumeBindingMode = &value - } - - // comma separated mount options - if opt, ok := scOptions[rbdmountOptions]; ok { - mOpt := strings.Split(opt, ",") - sc.MountOptions = append(sc.MountOptions, mOpt...) - } - _, err := c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{}) - Expect(err).Should(BeNil()) -} - -func createRadosNamespace(f *framework.Framework) { - stdOut, stdErr := execCommandInToolBoxPod(f, - fmt.Sprintf("rbd namespace ls --pool=%s", defaultRBDPool), rookNamespace) - Expect(stdErr).Should(BeEmpty()) - if !strings.Contains(stdOut, radosNamespace) { - _, stdErr = execCommandInToolBoxPod(f, - fmt.Sprintf("rbd namespace create %s", rbdOptions(defaultRBDPool)), rookNamespace) - Expect(stdErr).Should(BeEmpty()) - } - stdOut, stdErr = execCommandInToolBoxPod(f, - fmt.Sprintf("rbd namespace ls --pool=%s", rbdTopologyPool), rookNamespace) - Expect(stdErr).Should(BeEmpty()) - if !strings.Contains(stdOut, radosNamespace) { - _, stdErr = execCommandInToolBoxPod(f, - fmt.Sprintf("rbd namespace create %s", rbdOptions(rbdTopologyPool)), rookNamespace) - Expect(stdErr).Should(BeEmpty()) - } -} - -func deleteConfigMap(pluginPath string) { - path := pluginPath + configMap - _, err := framework.RunKubectl(cephCSINamespace, "delete", "-f", path, ns) - if err != nil { - e2elog.Logf("failed to delete configmap %v", err) - } -} - -func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Framework) { - path := pluginPath + configMap - cm := v1.ConfigMap{} - err := unmarshal(path, &cm) - Expect(err).Should(BeNil()) - - fsID, stdErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace) - Expect(stdErr).Should(BeEmpty()) - // remove new line present in fsID - fsID = strings.Trim(fsID, "\n") - // get mon list - mons := getMons(rookNamespace, c) - conmap := []util.ClusterInfo{{ - ClusterID: fsID, - Monitors: mons, - RadosNamespace: radosNamespace, - }} - if upgradeTesting { - subvolumegroup = "csi" - } - conmap[0].CephFS.SubvolumeGroup = subvolumegroup - data, err := json.Marshal(conmap) - Expect(err).Should(BeNil()) - cm.Data["config.json"] = string(data) - cm.Namespace = cephCSINamespace - // if the configmap is present update it,during cephcsi helm charts - // deployment empty configmap gets created we need to override it - _, err = c.CoreV1().ConfigMaps(cephCSINamespace).Get(context.TODO(), cm.Name, metav1.GetOptions{}) - - if err == nil { - _, updateErr := c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{}) - Expect(updateErr).Should(BeNil()) - } - if apierrs.IsNotFound(err) { - _, err = c.CoreV1().ConfigMaps(cephCSINamespace).Create(context.TODO(), &cm, metav1.CreateOptions{}) - } - - Expect(err).Should(BeNil()) -} - -func getSecret(path string) v1.Secret { +func getSecret(path string) (v1.Secret, error) { sc := v1.Secret{} err := unmarshal(path, &sc) // discard corruptInputError if err != nil { var b64cie base64.CorruptInputError if !errors.As(err, &b64cie) { - Expect(err).Should(BeNil()) + return sc, err } } - return sc -} - -func createCephfsSecret(c kubernetes.Interface, f *framework.Framework) { - scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "secret.yaml") - sc := getSecret(scPath) - - adminKey, stdErr := execCommandInToolBoxPod(f, "ceph auth get-key client.admin", rookNamespace) - Expect(stdErr).Should(BeEmpty()) - sc.StringData["adminID"] = "admin" - sc.StringData["adminKey"] = adminKey - delete(sc.StringData, "userID") - delete(sc.StringData, "userKey") - sc.Namespace = cephCSINamespace - _, err := c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{}) - Expect(err).Should(BeNil()) -} - -func createRBDSecret(c kubernetes.Interface, f *framework.Framework) { - scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "secret.yaml") - sc := getSecret(scPath) - - adminKey, stdErr := execCommandInToolBoxPod(f, "ceph auth get-key client.admin", rookNamespace) - Expect(stdErr).Should(BeEmpty()) - sc.StringData["userID"] = "admin" - sc.StringData["userKey"] = adminKey - sc.Namespace = cephCSINamespace - _, err := c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{}) - Expect(err).Should(BeNil()) - - err = updateSecretForEncryption(c) - Expect(err).Should(BeNil()) + return sc, nil } // updateSecretForEncryption is an hack to update the secrets created by rook to @@ -472,7 +125,7 @@ func updateSecretForEncryption(c kubernetes.Interface) error { return nil } -func deleteResource(scPath string) { +func deleteResource(scPath string) error { data, err := replaceNamespaceInTemplate(scPath) if err != nil { e2elog.Logf("failed to read content from %s %v", scPath, err) @@ -481,167 +134,7 @@ func deleteResource(scPath string) { if err != nil { e2elog.Logf("failed to delete %s %v", scPath, err) } - Expect(err).Should(BeNil()) -} - -func loadPVC(path string) (*v1.PersistentVolumeClaim, error) { - pvc := &v1.PersistentVolumeClaim{} - err := unmarshal(path, &pvc) - if err != nil { - return nil, err - } - return pvc, err -} - -func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error { - timeout := time.Duration(t) * time.Minute - pv := &v1.PersistentVolume{} - var err error - _, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{}) - Expect(err).Should(BeNil()) - if timeout == 0 { - return nil - } - name := pvc.Name - start := time.Now() - e2elog.Logf("Waiting up to %v to be in Bound state", pvc) - - return wait.PollImmediate(poll, timeout, func() (bool, error) { - e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", pvc.Name, int(time.Since(start).Seconds())) - pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - e2elog.Logf("Error getting pvc in namespace: '%s': %v", pvc.Namespace, err) - if testutils.IsRetryableAPIError(err) { - return false, nil - } - if apierrs.IsNotFound(err) { - return false, nil - } - return false, err - } - - if pvc.Spec.VolumeName == "" { - return false, nil - } - - pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) - if err != nil { - return false, err - } - if apierrs.IsNotFound(err) { - return false, nil - } - err = e2epv.WaitOnPVandPVC(c, pvc.Namespace, pv, pvc) - if err != nil { - return false, nil - } - return true, nil - }) -} - -func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error { - timeout := time.Duration(t) * time.Minute - nameSpace := pvc.Namespace - name := pvc.Name - var err error - e2elog.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace) - - pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return err - } - pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) - if err != nil { - return err - } - - err = c.CoreV1().PersistentVolumeClaims(nameSpace).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - return fmt.Errorf("delete of PVC %v failed: %w", name, err) - } - start := time.Now() - return wait.PollImmediate(poll, timeout, func() (bool, error) { - // Check that the PVC is really deleted. - e2elog.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", name, pvc.Status.String(), int(time.Since(start).Seconds())) - pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{}) - if err == nil { - return false, nil - } - if !apierrs.IsNotFound(err) { - return false, fmt.Errorf("get on deleted PVC %v failed with error other than \"not found\": %w", name, err) - } - - // Examine the pv.ClaimRef and UID. Expect nil values. - _, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{}) - if err == nil { - return false, nil - } - - if !apierrs.IsNotFound(err) { - return false, fmt.Errorf("delete PV %v failed with error other than \"not found\": %w", pv.Name, err) - } - - return true, nil - }) -} - -func loadApp(path string) (*v1.Pod, error) { - app := v1.Pod{} - err := unmarshal(path, &app) - if err != nil { - return nil, err - } - return &app, nil -} - -func createApp(c kubernetes.Interface, app *v1.Pod, timeout int) error { - _, err := c.CoreV1().Pods(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{}) - if err != nil { - return err - } - return waitForPodInRunningState(app.Name, app.Namespace, c, timeout) -} - -func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) error { - timeout := time.Duration(t) * time.Minute - start := time.Now() - e2elog.Logf("Waiting up to %v to be in Running state", name) - return wait.PollImmediate(poll, timeout, func() (bool, error) { - pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return false, err - } - switch pod.Status.Phase { - case v1.PodRunning: - return true, nil - case v1.PodFailed, v1.PodSucceeded: - return false, conditions.ErrPodCompleted - } - e2elog.Logf("%s app is in %s phase expected to be in Running state (%d seconds elapsed)", name, pod.Status.Phase, int(time.Since(start).Seconds())) - return false, nil - }) -} - -func deletePod(name, ns string, c kubernetes.Interface, t int) error { - timeout := time.Duration(t) * time.Minute - err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{}) - if err != nil { - return err - } - start := time.Now() - e2elog.Logf("Waiting for pod %v to be deleted", name) - return wait.PollImmediate(poll, timeout, func() (bool, error) { - _, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{}) - - if apierrs.IsNotFound(err) { - return true, nil - } - e2elog.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds())) - if err != nil { - return false, err - } - return false, nil - }) + return err } func unmarshal(fileName string, obj interface{}) error { @@ -691,78 +184,34 @@ func deletePVCAndApp(name string, f *framework.Framework, pvc *v1.PersistentVolu return err } -func createPVCAndAppBinding(pvcPath, appPath string, f *framework.Framework, pvcTimeout int) (*v1.PersistentVolumeClaim, *v1.Pod) { +func createPVCAndAppBinding(pvcPath, appPath string, f *framework.Framework, pvcTimeout int) (*v1.PersistentVolumeClaim, *v1.Pod, error) { pvc, err := loadPVC(pvcPath) if pvc == nil { - Fail(err.Error()) + return nil, nil, err } pvc.Namespace = f.UniqueName - e2elog.Logf("The PVC template %+v", pvc) app, err := loadApp(appPath) if err != nil { - Fail(err.Error()) + return nil, nil, err } app.Namespace = f.UniqueName err = createPVCAndApp("", f, pvc, app, pvcTimeout) if err != nil { - Fail(err.Error()) + return nil, nil, err } - return pvc, app + return pvc, app, nil } -func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) { - pvc, app := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) - err := deletePVCAndApp("", f, pvc, app) +func validatePVCAndAppBinding(pvcPath, appPath string, f *framework.Framework) error { + pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) if err != nil { - Fail(err.Error()) + return err } -} - -type imageInfoFromPVC struct { - imageID string - imageName string - csiVolumeHandle string - pvName string -} - -// getImageInfoFromPVC reads volume handle of the bound PV to the passed in PVC, -// and returns imageInfoFromPVC or error. -func getImageInfoFromPVC(pvcNamespace, pvcName string, f *framework.Framework) (imageInfoFromPVC, error) { - var imageData imageInfoFromPVC - - c := f.ClientSet.CoreV1() - pvc, err := c.PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{}) - if err != nil { - return imageData, err - } - - pv, err := c.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{}) - if err != nil { - return imageData, err - } - - imageIDRegex := regexp.MustCompile(`(\w+\-?){5}$`) - imageID := imageIDRegex.FindString(pv.Spec.CSI.VolumeHandle) - - imageData = imageInfoFromPVC{ - imageID: imageID, - imageName: fmt.Sprintf("csi-vol-%s", imageID), - csiVolumeHandle: pv.Spec.CSI.VolumeHandle, - pvName: pv.Name, - } - return imageData, nil -} - -func getImageMeta(rbdImageSpec, metaKey string, f *framework.Framework) (string, error) { - cmd := fmt.Sprintf("rbd image-meta get %s %s", rbdImageSpec, metaKey) - stdOut, stdErr := execCommandInToolBoxPod(f, cmd, rookNamespace) - if stdErr != "" { - return strings.TrimSpace(stdOut), fmt.Errorf(stdErr) - } - return strings.TrimSpace(stdOut), nil + err = deletePVCAndApp("", f, pvc, app) + return err } func getMountType(appName, appNamespace, mountPath string, f *framework.Framework) (string, error) { @@ -770,7 +219,10 @@ func getMountType(appName, appNamespace, mountPath string, f *framework.Framewor FieldSelector: fields.OneTermEqualSelector("metadata.name", appName).String(), } cmd := fmt.Sprintf("lsblk -o TYPE,MOUNTPOINT | grep '%s' | awk '{print $1}'", mountPath) - stdOut, stdErr := execCommandInPod(f, cmd, appNamespace, &opt) + stdOut, stdErr, err := execCommandInPod(f, cmd, appNamespace, &opt) + if err != nil { + return "", err + } if stdErr != "" { return strings.TrimSpace(stdOut), fmt.Errorf(stdErr) } @@ -794,68 +246,16 @@ func readVaultSecret(key string, f *framework.Framework) (string, string) { return strings.TrimSpace(stdOut), strings.TrimSpace(stdErr) } -func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framework.Framework) { - pvc, app := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout) - - imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) - if err != nil { - Fail(err.Error()) - } - rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName) - encryptedState, err := getImageMeta(rbdImageSpec, ".rbd.csi.ceph.com/encrypted", f) - if err != nil { - Fail(err.Error()) - } - Expect(encryptedState).To(Equal("encrypted")) - - volumeMountPath := app.Spec.Containers[0].VolumeMounts[0].MountPath - mountType, err := getMountType(app.Name, app.Namespace, volumeMountPath, f) - if err != nil { - Fail(err.Error()) - } - Expect(mountType).To(Equal("crypt")) - - if kms == "vault" { - // check new passphrase created - _, stdErr := readVaultSecret(imageData.csiVolumeHandle, f) - if stdErr != "" { - Fail(fmt.Sprintf("failed to read passphrase from vault: %s", stdErr)) - } - } - - err = deletePVCAndApp("", f, pvc, app) - if err != nil { - Fail(err.Error()) - } - - if kms == "vault" { - // check new passphrase created - stdOut, _ := readVaultSecret(imageData.csiVolumeHandle, f) - if stdOut != "" { - Fail(fmt.Sprintf("passphrase found in vault while should be deleted: %s", stdOut)) - } - } -} - -func deletePodWithLabel(label, ns string, skipNotFound bool) error { - _, err := framework.RunKubectl(cephCSINamespace, "delete", "po", "-l", label, fmt.Sprintf("--ignore-not-found=%t", skipNotFound), fmt.Sprintf("--namespace=%s", ns)) - if err != nil { - e2elog.Logf("failed to delete pod %v", err) - } - return err -} - -func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) { +func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) error { pvc, err := loadPVC(pvcPath) if err != nil { - Fail(err.Error()) + return err } pvc.Namespace = f.UniqueName pvc.Name = f.UniqueName - e2elog.Logf("The PVC template %+v", pvc) err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout) if err != nil { - Fail(err.Error()) + return err } var user int64 = 2000 app := &v1.Pod{ @@ -902,51 +302,26 @@ func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) { err = createApp(f.ClientSet, app, deployTimeout) if err != nil { - Fail(err.Error()) + return err } opt := metav1.ListOptions{ LabelSelector: "app=pod-run-as-non-root", } - execCommandInPod(f, "echo testing > /target/testing", app.Namespace, &opt) - + _, stdErr, err := execCommandInPod(f, "echo testing > /target/testing", app.Namespace, &opt) + if err != nil { + return nil + } + if stdErr != "" { + return fmt.Errorf("failed to touch a file as non-root user %v", stdErr) + } err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) - if err != nil { - Fail(err.Error()) - } - - err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) - if err != nil { - Fail(err.Error()) - } -} - -func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error { - imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) if err != nil { return err } - _, stdErr := execCommandInToolBoxPod(f, "ceph fs subvolume rm myfs "+imageData.imageName+" "+subvolumegroup, rookNamespace) - Expect(stdErr).Should(BeEmpty()) - - if stdErr != "" { - return fmt.Errorf("error deleting backing volume %s", imageData.imageName) - } - return nil -} - -func listRBDImages(f *framework.Framework) []string { - stdout, stdErr := execCommandInToolBoxPod(f, - fmt.Sprintf("rbd ls --format=json %s", rbdOptions(defaultRBDPool)), rookNamespace) - Expect(stdErr).Should(BeEmpty()) - var imgInfos []string - - err := json.Unmarshal([]byte(stdout), &imgInfos) - if err != nil { - Fail(err.Error()) - } - return imgInfos + err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) + return err } // writeDataInPod fill zero content to a file in the provided POD volume. @@ -965,25 +340,15 @@ func writeDataInPod(app *v1.Pod, f *framework.Framework) error { // instead of filling and reverifying the md5sum/data integrity filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" // While writing more data we are encountering issues in E2E timeout, so keeping it low for now - _, writeErr := execCommandInPod(f, fmt.Sprintf("dd if=/dev/zero of=%s bs=1M count=10 status=none", filePath), app.Namespace, &opt) - Expect(writeErr).Should(BeEmpty()) - return nil -} - -type cephfsSubVolume struct { - Name string `json:"name"` -} - -func listCephFSSubVolumes(f *framework.Framework, filesystem, groupname string) []cephfsSubVolume { - stdout, stdErr := execCommandInToolBoxPod(f, fmt.Sprintf("ceph fs subvolume ls %s --group_name=%s --format=json", filesystem, groupname), rookNamespace) - Expect(stdErr).Should(BeEmpty()) - var subVols []cephfsSubVolume - - err := json.Unmarshal([]byte(stdout), &subVols) + _, writeErr, err := execCommandInPod(f, fmt.Sprintf("dd if=/dev/zero of=%s bs=1M count=10 status=none", filePath), app.Namespace, &opt) if err != nil { - Fail(err.Error()) + return err } - return subVols + if writeErr != "" { + err = fmt.Errorf("failed to write data %v", writeErr) + } + + return err } func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error { @@ -1013,8 +378,13 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error { // write data to PVC filePath := app.Spec.Containers[0].VolumeMounts[0].MountPath + "/test" - execCommandInPod(f, fmt.Sprintf("echo %s > %s", data, filePath), app.Namespace, &opt) - + _, stdErr, err := execCommandInPod(f, fmt.Sprintf("echo %s > %s", data, filePath), app.Namespace, &opt) + if err != nil { + return nil + } + if stdErr != "" { + return fmt.Errorf("failed to write data to a file %v", stdErr) + } // delete app err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout) if err != nil { @@ -1025,8 +395,13 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error { if err != nil { return err } - persistData, stdErr := execCommandInPod(f, fmt.Sprintf("cat %s", filePath), app.Namespace, &opt) - Expect(stdErr).Should(BeEmpty()) + persistData, stdErr, err := execCommandInPod(f, fmt.Sprintf("cat %s", filePath), app.Namespace, &opt) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("failed to get file content %v", stdErr) + } if !strings.Contains(persistData, data) { return fmt.Errorf("data not persistent expected data %s received data %s ", data, persistData) } @@ -1035,42 +410,6 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error { return err } -func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error { - imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) - if err != nil { - return err - } - - cmd := fmt.Sprintf("rbd rm %s %s", rbdOptions(defaultRBDPool), imageData.imageName) - execCommandInToolBoxPod(f, cmd, rookNamespace) - return nil -} - -func deletePool(name string, cephfs bool, f *framework.Framework) { - var cmds = []string{} - if cephfs { - // ceph fs fail - // ceph fs rm myfs --yes-i-really-mean-it - // ceph osd pool delete myfs-metadata myfs-metadata - // --yes-i-really-mean-it - // ceph osd pool delete myfs-data0 myfs-data0 - // --yes-i-really-mean-it - cmds = append(cmds, fmt.Sprintf("ceph fs fail %s", name), - fmt.Sprintf("ceph fs rm %s --yes-i-really-mean-it", name), - fmt.Sprintf("ceph osd pool delete %s-metadata %s-metadata --yes-i-really-really-mean-it", name, name), - fmt.Sprintf("ceph osd pool delete %s-data0 %s-data0 --yes-i-really-really-mean-it", name, name)) - } else { - // ceph osd pool delete replicapool replicapool - // --yes-i-really-mean-it - cmds = append(cmds, fmt.Sprintf("ceph osd pool delete %s %s --yes-i-really-really-mean-it", name, name)) - } - - for _, cmd := range cmds { - // discard stdErr as some commands prints warning in strErr - execCommandInToolBoxPod(f, cmd, rookNamespace) - } -} - func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framework) error { pvc, err := loadPVC(pvcPath) if err != nil { @@ -1088,14 +427,20 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo return err } // delete cephfs filesystem - deletePool("myfs", cephfs, f) + err = deletePool("myfs", cephfs, f) + if err != nil { + return err + } } else { err = deleteBackingRBDImage(f, pvc) if err != nil { return err } // delete rbd pool - deletePool(defaultRBDPool, cephfs, f) + err = deletePool(defaultRBDPool, cephfs, f) + if err != nil { + return err + } } err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout) return err @@ -1126,8 +471,13 @@ func checkMountOptions(pvcPath, appPath string, f *framework.Framework, mountFla } cmd := fmt.Sprintf("mount |grep %s", app.Spec.Containers[0].VolumeMounts[0].MountPath) - data, stdErr := execCommandInPod(f, cmd, app.Namespace, &opt) - Expect(stdErr).Should(BeEmpty()) + data, stdErr, err := execCommandInPod(f, cmd, app.Namespace, &opt) + if err != nil { + return err + } + if stdErr != "" { + return fmt.Errorf("failed to get mount point %v", stdErr) + } for _, f := range mountFlags { if !strings.Contains(data, f) { return fmt.Errorf("mount option %s not found in %s", f, data) @@ -1138,223 +488,7 @@ func checkMountOptions(pvcPath, appPath string, f *framework.Framework, mountFla return err } -func createNodeLabel(f *framework.Framework, labelKey, labelValue string) { - // NOTE: This makes all nodes (in a multi-node setup) in the test take - // the same label values, which is fine for the test - nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - Expect(err).Should(BeNil()) - for i := range nodes.Items { - framework.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue) - } -} - -func deleteNodeLabel(c kubernetes.Interface, labelKey string) { - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - Expect(err).Should(BeNil()) - for i := range nodes.Items { - framework.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey) - } -} - -func checkNodeHasLabel(c kubernetes.Interface, labelKey, labelValue string) { - nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) - Expect(err).Should(BeNil()) - for i := range nodes.Items { - framework.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue) - } -} - -func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) (string, error) { - imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) - if err != nil { - return "", err - } - - stdOut, stdErr := execCommandInToolBoxPod(f, - fmt.Sprintf("rbd info %s", imageSpec(pool, imageData.imageName)), rookNamespace) - Expect(stdErr).Should(BeEmpty()) - - if radosNamespace != "" { - e2elog.Logf("found image %s in pool %s namespace %s", imageData.imageName, pool, radosNamespace) - } else { - e2elog.Logf("found image %s in pool %s", imageData.imageName, pool) - } - - return stdOut, nil -} - -func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error { - _, err := getPVCImageInfoInPool(f, pvc, pool) - - return err -} - -func checkPVCDataPoolForImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool, dataPool string) error { - stdOut, err := getPVCImageInfoInPool(f, pvc, pool) - if err != nil { - return err - } - - if !strings.Contains(stdOut, "data_pool: "+dataPool) { - return fmt.Errorf("missing data pool value in image info, got info (%s)", stdOut) - } - - return nil -} - -func checkPVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error { - imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) - if err != nil { - return err - } - - _, stdErr := execCommandInToolBoxPod(f, - fmt.Sprintf("rados listomapkeys %s csi.volume.%s", rbdOptions(pool), imageData.imageID), rookNamespace) - Expect(stdErr).Should(BeEmpty()) - - if radosNamespace != "" { - e2elog.Logf("found image journal %s in pool %s namespace %s", "csi.volume."+imageData.imageID, pool, radosNamespace) - } else { - e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool) - } - - return nil -} - -func checkPVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error { - imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f) - if err != nil { - return err - } - - _, stdErr := execCommandInToolBoxPod(f, - fmt.Sprintf("rados getomapval %s csi.volumes.default csi.volume.%s", rbdOptions(pool), imageData.pvName), rookNamespace) - Expect(stdErr).Should(BeEmpty()) - - if radosNamespace != "" { - e2elog.Logf("found CSI journal entry %s in pool %s namespace %s", "csi.volume."+imageData.pvName, pool, radosNamespace) - } else { - e2elog.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool) - } - - return nil -} - -// getBoundPV returns a PV details. -func getBoundPV(client kubernetes.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) { - // Get new copy of the claim - claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) - if err != nil { - return nil, err - } - - // Get the bound PV - pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{}) - return pv, err -} - -func checkPVSelectorValuesForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim) { - pv, err := getBoundPV(f.ClientSet, pvc) - if err != nil { - Fail(err.Error()) - } - - if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 { - Fail("Found empty NodeSelectorTerms in PV") - } - - rFound := false - zFound := false - for _, expression := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions { - switch expression.Key { - case nodeCSIRegionLabel: - if rFound { - Fail("Found multiple occurrences of topology key for region") - } - rFound = true - if expression.Values[0] != regionValue { - Fail("Topology value for region label mismatch") - } - case nodeCSIZoneLabel: - if zFound { - Fail("Found multiple occurrences of topology key for zone") - } - zFound = true - if expression.Values[0] != zoneValue { - Fail("Topology value for zone label mismatch") - } - default: - Fail("Unexpected key in node selector terms found in PV") - } - } -} - func addTopologyDomainsToDSYaml(template, labels string) string { return strings.ReplaceAll(template, "# - \"--domainlabels=failure-domain/region,failure-domain/zone\"", "- \"--domainlabels="+labels+"\"") } - -// createCustomConfigMap provides multiple clusters information. -func createCustomConfigMap(c kubernetes.Interface, pluginPath string, subvolgrpInfo map[string]string) { - path := pluginPath + configMap - cm := v1.ConfigMap{} - err := unmarshal(path, &cm) - Expect(err).Should(BeNil()) - - // get mon list - mons := getMons(rookNamespace, c) - // get clusterIDs - var clusterID []string - for key := range subvolgrpInfo { - clusterID = append(clusterID, key) - } - conmap := []util.ClusterInfo{ - { - ClusterID: clusterID[0], - Monitors: mons, - }, - { - ClusterID: clusterID[1], - Monitors: mons, - }} - for i := 0; i < len(subvolgrpInfo); i++ { - conmap[i].CephFS.SubvolumeGroup = subvolgrpInfo[clusterID[i]] - } - data, err := json.Marshal(conmap) - Expect(err).Should(BeNil()) - cm.Data["config.json"] = string(data) - cm.Namespace = cephCSINamespace - // since a configmap is already created, update the existing configmap - _, updateErr := c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{}) - Expect(updateErr).Should(BeNil()) -} - -// validateSubvolumegroup validates whether subvolumegroup is present. -func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error { - cmd := fmt.Sprintf("ceph fs subvolumegroup getpath myfs %s", subvolgrp) - stdOut, err := execCommandInToolBoxPod(f, cmd, rookNamespace) - Expect(err).Should(BeEmpty()) - if err != "" { - return fmt.Errorf("error subvolumegroup %s doesn't exist", subvolgrp) - } - expectedGrpPath := "/volumes/" + subvolgrp - stdOut = strings.TrimSpace(stdOut) - if stdOut != expectedGrpPath { - return fmt.Errorf("error unexpected group path. Found: %s", stdOut) - } - return nil -} - -func imageSpec(pool, image string) string { - if radosNamespace != "" { - return pool + "/" + radosNamespace + "/" + image - } - return pool + "/" + image -} - -func rbdOptions(pool string) string { - if radosNamespace != "" { - return "--pool=" + pool + " --namespace " + radosNamespace - } - return "--pool=" + pool -}