mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-26 08:10:20 +00:00
e2e: rework on E2E framework
rework of E2E framework for better code organization and add more helpful logs for debugging. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
parent
3ea22bc5a8
commit
b4693dcffe
401
e2e/cephfs.go
401
e2e/cephfs.go
@ -34,21 +34,21 @@ func deployCephfsPlugin() {
|
|||||||
|
|
||||||
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to delete provisioner rbac %s %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to delete nodeplugin rbac %s %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
createORDeleteCephfsResouces("create")
|
createORDeleteCephfsResouces("create")
|
||||||
@ -61,63 +61,73 @@ func deleteCephfsPlugin() {
|
|||||||
func createORDeleteCephfsResouces(action string) {
|
func createORDeleteCephfsResouces(action string) {
|
||||||
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisioner)
|
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisioner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisioner, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisioner, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to %s cephfs provisioner %v", action, err)
|
e2elog.Failf("failed to %s CephFS provisioner with error %v", action, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to %s cephfs provisioner rbac %v", action, err)
|
e2elog.Failf("failed to %s CephFS provisioner rbac with error %v", action, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerPSP)
|
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerPSP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsProvisionerPSP, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerPSP, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to %s cephfs provisioner psp %v", action, err)
|
e2elog.Failf("failed to %s CephFS provisioner psp with error %v", action, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePlugin)
|
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePlugin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePlugin, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePlugin, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to %s cephfs nodeplugin %v", action, err)
|
e2elog.Failf("failed to %s CephFS nodeplugin with error %v", action, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to %s cephfs nodeplugin rbac %v", action, err)
|
e2elog.Failf("failed to %s CephFS nodeplugin rbac with error %v", action, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginPSP)
|
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginPSP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to read content from %s %v", cephfsDirPath+cephfsNodePluginPSP, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginPSP, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, action, ns, "-f", "-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to %s cephfs nodeplugin psp %v", action, err)
|
e2elog.Failf("failed to %s CephFS nodeplugin psp with error %v", action, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateSubvolumeCount(f *framework.Framework, count int, fileSystemName, subvolumegroup string) {
|
||||||
|
subVol, err := listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to list CephFS subvolumes with error %v", err)
|
||||||
|
}
|
||||||
|
if len(subVol) != count {
|
||||||
|
e2elog.Failf("subvolumes [%v]. subvolume count %d not matching expected count %v", subVol, len(subVol), count)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ = Describe("cephfs", func() {
|
var _ = Describe("cephfs", func() {
|
||||||
f := framework.NewDefaultFramework("cephfs")
|
f := framework.NewDefaultFramework("cephfs")
|
||||||
var c clientset.Interface
|
var c clientset.Interface
|
||||||
// deploy cephfs CSI
|
// deploy CephFS CSI
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
if !testCephFS || upgradeTesting {
|
if !testCephFS || upgradeTesting {
|
||||||
Skip("Skipping CephFS E2E")
|
Skip("Skipping CephFS E2E")
|
||||||
@ -127,13 +137,19 @@ var _ = Describe("cephfs", func() {
|
|||||||
if cephCSINamespace != defaultNs {
|
if cephCSINamespace != defaultNs {
|
||||||
err := createNamespace(c, cephCSINamespace)
|
err := createNamespace(c, cephCSINamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create namespace %s with error %v", cephCSINamespace, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
deployCephfsPlugin()
|
deployCephfsPlugin()
|
||||||
}
|
}
|
||||||
createConfigMap(cephfsDirPath, f.ClientSet, f)
|
err := createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||||
createCephfsSecret(f.ClientSet, f)
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create configmap with error %v", err)
|
||||||
|
}
|
||||||
|
err = createCephfsSecret(f.ClientSet, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create secret with error %v", err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
@ -148,22 +164,31 @@ var _ = Describe("cephfs", func() {
|
|||||||
// log node plugin
|
// log node plugin
|
||||||
logsCSIPods("app=csi-cephfsplugin", c)
|
logsCSIPods("app=csi-cephfsplugin", c)
|
||||||
}
|
}
|
||||||
deleteConfigMap(cephfsDirPath)
|
err := deleteConfigMap(cephfsDirPath)
|
||||||
deleteResource(cephfsExamplePath + "secret.yaml")
|
if err != nil {
|
||||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(cephfsExamplePath + "secret.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete secret with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
|
}
|
||||||
if deployCephFS {
|
if deployCephFS {
|
||||||
deleteCephfsPlugin()
|
deleteCephfsPlugin()
|
||||||
if cephCSINamespace != defaultNs {
|
if cephCSINamespace != defaultNs {
|
||||||
err := deleteNamespace(c, cephCSINamespace)
|
err := deleteNamespace(c, cephCSINamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete namespace %s with error %v", cephCSINamespace, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("Test cephfs CSI", func() {
|
Context("Test CephFS CSI", func() {
|
||||||
It("Test cephfs CSI", func() {
|
It("Test CephFS CSI", func() {
|
||||||
pvcPath := cephfsExamplePath + "pvc.yaml"
|
pvcPath := cephfsExamplePath + "pvc.yaml"
|
||||||
appPath := cephfsExamplePath + "pod.yaml"
|
appPath := cephfsExamplePath + "pod.yaml"
|
||||||
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
|
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
|
||||||
@ -172,182 +197,217 @@ var _ = Describe("cephfs", func() {
|
|||||||
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
|
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
|
||||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
||||||
|
|
||||||
By("checking provisioner deployment is running")
|
By("checking provisioner deployment is running", func() {
|
||||||
var err error
|
err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("checking nodeplugin deamonsets is running")
|
By("checking nodeplugin deamonset pods are running", func() {
|
||||||
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err := waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("timeout waiting for daemonset %s with error %v", cephfsDeamonSetName, err)
|
||||||
}
|
}
|
||||||
|
})
|
||||||
|
|
||||||
By("check static PVC", func() {
|
By("check static PVC", func() {
|
||||||
scPath := cephfsExamplePath + "secret.yaml"
|
scPath := cephfsExamplePath + "secret.yaml"
|
||||||
err := validateCephFsStaticPV(f, appPath, scPath)
|
err := validateCephFsStaticPV(f, appPath, scPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to validate CephFS static pv with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("create a storage class with pool and a PVC then Bind it to an app", func() {
|
By("create a storageclass with pool and a PVC then bind it to an app", func() {
|
||||||
createCephfsStorageClass(f.ClientSet, f, true, "")
|
err := createCephfsStorageClass(f.ClientSet, f, true, "")
|
||||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
if err != nil {
|
||||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
e2elog.Failf("failed to create CephFS storageclass with error %v", err)
|
||||||
|
}
|
||||||
|
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
createCephfsStorageClass(f.ClientSet, f, false, "")
|
By("create a PVC and bind it to an app", func() {
|
||||||
|
err := createCephfsStorageClass(f.ClientSet, f, false, "")
|
||||||
By("create and delete a PVC", func() {
|
if err != nil {
|
||||||
By("create a PVC and Bind it to an app", func() {
|
e2elog.Failf("failed to create CephFS storageclass with error %v", err)
|
||||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
}
|
||||||
|
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("create a PVC and Bind it to an app with normal user", func() {
|
By("create a PVC and bind it to an app with normal user", func() {
|
||||||
validateNormalUserPVCAccess(pvcPath, f)
|
err := validateNormalUserPVCAccess(pvcPath, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to validate normal user CephFS pvc and application binding with error %v", err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("create/delete multiple PVCs and Apps", func() {
|
By("create/delete multiple PVCs and Apps", func() {
|
||||||
totalCount := 2
|
totalCount := 2
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load PVC with error %v", err)
|
||||||
}
|
}
|
||||||
pvc.Namespace = f.UniqueName
|
pvc.Namespace = f.UniqueName
|
||||||
|
|
||||||
app, err := loadApp(appPath)
|
app, err := loadApp(appPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load application with error %v", err)
|
||||||
}
|
}
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
// create pvc and app
|
// create PVC and app
|
||||||
for i := 0; i < totalCount; i++ {
|
for i := 0; i < totalCount; i++ {
|
||||||
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
||||||
err := createPVCAndApp(name, f, pvc, app, deployTimeout)
|
err = createPVCAndApp(name, f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
subVol := listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
|
||||||
if len(subVol) != totalCount {
|
validateSubvolumeCount(f, totalCount, fileSystemName, subvolumegroup)
|
||||||
msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), totalCount)
|
// delete PVC and app
|
||||||
e2elog.Logf(msg)
|
|
||||||
Fail(msg)
|
|
||||||
}
|
|
||||||
// delete pvc and app
|
|
||||||
for i := 0; i < totalCount; i++ {
|
for i := 0; i < totalCount; i++ {
|
||||||
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
name := fmt.Sprintf("%s%d", f.UniqueName, i)
|
||||||
err := deletePVCAndApp(name, f, pvc, app)
|
err = deletePVCAndApp(name, f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||||
if len(subVol) != 0 {
|
|
||||||
msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), 0)
|
|
||||||
e2elog.Logf(msg)
|
|
||||||
Fail(msg)
|
|
||||||
}
|
|
||||||
})
|
})
|
||||||
|
|
||||||
By("check data persist after recreating pod with same pvc", func() {
|
By("check data persist after recreating pod", func() {
|
||||||
err := checkDataPersist(pvcPath, appPath, f)
|
err := checkDataPersist(pvcPath, appPath, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to check data persist in pvc with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("creating a PVC, deleting backing subvolume, and checking successful PV deletion", func() {
|
By("create PVC, delete backing subvolume and check pv deletion", func() {
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if pvc == nil {
|
if pvc == nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load PVC with error %v", err)
|
||||||
}
|
}
|
||||||
pvc.Namespace = f.UniqueName
|
pvc.Namespace = f.UniqueName
|
||||||
|
|
||||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create PVC with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = deleteBackingCephFSVolume(f, pvc)
|
err = deleteBackingCephFSVolume(f, pvc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete CephFS subvolume with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete PVC with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("validate multiple subvolumegroup creation", func() {
|
By("validate multiple subvolumegroup creation", func() {
|
||||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err := deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
|
}
|
||||||
// re-define configmap with information of multiple clusters.
|
// re-define configmap with information of multiple clusters.
|
||||||
subvolgrpInfo := map[string]string{
|
subvolgrpInfo := map[string]string{
|
||||||
"clusterID-1": "subvolgrp1",
|
"clusterID-1": "subvolgrp1",
|
||||||
"clusterID-2": "subvolgrp2",
|
"clusterID-2": "subvolgrp2",
|
||||||
}
|
}
|
||||||
createCustomConfigMap(f.ClientSet, cephfsDirPath, subvolgrpInfo)
|
err = createCustomConfigMap(f.ClientSet, cephfsDirPath, subvolgrpInfo)
|
||||||
createCephfsStorageClass(f.ClientSet, f, false, "clusterID-1")
|
|
||||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
|
||||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
|
||||||
// verify subvolumegroup creation.
|
|
||||||
err := validateSubvolumegroup(f, "subvolgrp1")
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create configmap with error %v", err)
|
||||||
|
}
|
||||||
|
err = createCephfsStorageClass(f.ClientSet, f, false, "clusterID-1")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||||
|
}
|
||||||
|
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
|
}
|
||||||
|
// verify subvolumegroup creation.
|
||||||
|
err = validateSubvolumegroup(f, "subvolgrp1")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to validate subvolume group with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// create resources and verify subvolume group creation
|
// create resources and verify subvolume group creation
|
||||||
// for the second cluster.
|
// for the second cluster.
|
||||||
createCephfsStorageClass(f.ClientSet, f, false, "clusterID-2")
|
err = createCephfsStorageClass(f.ClientSet, f, false, "clusterID-2")
|
||||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
if err != nil {
|
||||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||||
|
}
|
||||||
|
err = validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
|
}
|
||||||
err = validateSubvolumegroup(f, "subvolgrp2")
|
err = validateSubvolumegroup(f, "subvolgrp2")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to validate subvolume group with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteConfigMap(cephfsDirPath)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||||
|
}
|
||||||
|
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create configmap with error %v", err)
|
||||||
|
}
|
||||||
|
err = createCephfsStorageClass(f.ClientSet, f, false, "")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
deleteConfigMap(cephfsDirPath)
|
|
||||||
})
|
})
|
||||||
|
|
||||||
createConfigMap(cephfsDirPath, f.ClientSet, f)
|
|
||||||
createCephfsStorageClass(f.ClientSet, f, false, "")
|
|
||||||
|
|
||||||
By("Resize PVC and check application directory size", func() {
|
By("Resize PVC and check application directory size", func() {
|
||||||
v, err := f.ClientSet.Discovery().ServerVersion()
|
v, err := f.ClientSet.Discovery().ServerVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to get server version with error %v", err)
|
e2elog.Failf("failed to get server version with error with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Resize 0.3.0 is only supported from v1.15+
|
// Resize 0.3.0 is only supported from v1.15+
|
||||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") {
|
if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") {
|
||||||
err := resizePVCAndValidateSize(pvcPath, appPath, f)
|
err := resizePVCAndValidateSize(pvcPath, appPath, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to resize PVC %v", err)
|
e2elog.Failf("failed to resize PVC with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
By("Mount pvc as readonly in pod", func() {
|
By("Mount pvc as readonly in pod", func() {
|
||||||
// create pvc and bind it to an app
|
// create PVC and bind it to an app
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load PVC with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pvc.Namespace = f.UniqueName
|
pvc.Namespace = f.UniqueName
|
||||||
|
|
||||||
app, err := loadApp(appPath)
|
app, err := loadApp(appPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load application with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
@ -359,7 +419,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true
|
app.Spec.Volumes[0].PersistentVolumeClaim.ReadOnly = true
|
||||||
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
opt := metav1.ListOptions{
|
opt := metav1.ListOptions{
|
||||||
@ -370,21 +430,20 @@ var _ = Describe("cephfs", func() {
|
|||||||
_, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt)
|
_, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt)
|
||||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||||
if !strings.Contains(stdErr, readOnlyErr) {
|
if !strings.Contains(stdErr, readOnlyErr) {
|
||||||
Fail(stdErr)
|
e2elog.Failf(stdErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete pvc and app
|
// delete PVC and app
|
||||||
err = deletePVCAndApp("", f, pvc, app)
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("create a PVC clone and bind it to an app", func() {
|
By("create a PVC clone and bind it to an app", func() {
|
||||||
v, err := f.ClientSet.Discovery().ServerVersion()
|
v, err := f.ClientSet.Discovery().ServerVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to get server version with error %v", err)
|
e2elog.Failf("failed to get server version with error with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
// snapshot beta is only supported from v1.17+
|
// snapshot beta is only supported from v1.17+
|
||||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") {
|
if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") {
|
||||||
@ -394,29 +453,31 @@ var _ = Describe("cephfs", func() {
|
|||||||
// always totalCount+parentPVC
|
// always totalCount+parentPVC
|
||||||
totalSubvolumes := totalCount + 1
|
totalSubvolumes := totalCount + 1
|
||||||
wg.Add(totalCount)
|
wg.Add(totalCount)
|
||||||
createCephFSSnapshotClass(f)
|
err = createCephFSSnapshotClass(f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||||
|
}
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load PVC with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pvc.Namespace = f.UniqueName
|
pvc.Namespace = f.UniqueName
|
||||||
e2elog.Logf("The PVC template %+v", pvc)
|
|
||||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create PVC with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
app, err := loadApp(appPath)
|
app, err := loadApp(appPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load application with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
||||||
wErr := writeDataInPod(app, f)
|
wErr := writeDataInPod(app, f)
|
||||||
if wErr != nil {
|
if wErr != nil {
|
||||||
Fail(wErr.Error())
|
e2elog.Failf("failed to write data with error %v", wErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
snap := getSnapshot(snapshotPath)
|
snap := getSnapshot(snapshotPath)
|
||||||
@ -428,8 +489,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
|
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
|
||||||
err = createSnapshot(&s, deployTimeout)
|
err = createSnapshot(&s, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to create snapshot %v", err)
|
e2elog.Failf("failed to create snapshot with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
w.Done()
|
w.Done()
|
||||||
}(&wg, i, snap)
|
}(&wg, i, snap)
|
||||||
@ -438,11 +498,11 @@ var _ = Describe("cephfs", func() {
|
|||||||
|
|
||||||
pvcClone, err := loadPVC(pvcClonePath)
|
pvcClone, err := loadPVC(pvcClonePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load PVC with error %v", err)
|
||||||
}
|
}
|
||||||
appClone, err := loadApp(appClonePath)
|
appClone, err := loadApp(appClonePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load application with error %v", err)
|
||||||
}
|
}
|
||||||
pvcClone.Namespace = f.UniqueName
|
pvcClone.Namespace = f.UniqueName
|
||||||
appClone.Namespace = f.UniqueName
|
appClone.Namespace = f.UniqueName
|
||||||
@ -456,20 +516,15 @@ var _ = Describe("cephfs", func() {
|
|||||||
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
||||||
err = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
err = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to create pvc and app %v", err)
|
e2elog.Failf("failed to create PVC and app with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
w.Done()
|
w.Done()
|
||||||
}(&wg, i, *pvcClone, *appClone)
|
}(&wg, i, *pvcClone, *appClone)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
subVol := listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup)
|
||||||
if len(subVol) != totalSubvolumes {
|
|
||||||
msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), totalSubvolumes)
|
|
||||||
e2elog.Logf(msg)
|
|
||||||
Fail(msg)
|
|
||||||
}
|
|
||||||
wg.Add(totalCount)
|
wg.Add(totalCount)
|
||||||
// delete clone and app
|
// delete clone and app
|
||||||
for i := 0; i < totalCount; i++ {
|
for i := 0; i < totalCount; i++ {
|
||||||
@ -478,8 +533,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
p.Spec.DataSource.Name = name
|
p.Spec.DataSource.Name = name
|
||||||
err = deletePVCAndApp(name, f, &p, &a)
|
err = deletePVCAndApp(name, f, &p, &a)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to delete pvc and app %v", err)
|
e2elog.Failf("failed to delete PVC and app with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
w.Done()
|
w.Done()
|
||||||
}(&wg, i, *pvcClone, *appClone)
|
}(&wg, i, *pvcClone, *appClone)
|
||||||
@ -487,12 +541,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
parentPVCCount := totalSubvolumes - totalCount
|
parentPVCCount := totalSubvolumes - totalCount
|
||||||
subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
validateSubvolumeCount(f, parentPVCCount, fileSystemName, subvolumegroup)
|
||||||
if len(subVol) != parentPVCCount {
|
|
||||||
msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), parentPVCCount)
|
|
||||||
e2elog.Logf(msg)
|
|
||||||
Fail(msg)
|
|
||||||
}
|
|
||||||
// create clones from different snapshosts and bind it to an
|
// create clones from different snapshosts and bind it to an
|
||||||
// app
|
// app
|
||||||
wg.Add(totalCount)
|
wg.Add(totalCount)
|
||||||
@ -502,20 +551,14 @@ var _ = Describe("cephfs", func() {
|
|||||||
p.Spec.DataSource.Name = name
|
p.Spec.DataSource.Name = name
|
||||||
err = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
err = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to create pvc and app %v", err)
|
e2elog.Failf("failed to create PVC and app with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
w.Done()
|
w.Done()
|
||||||
}(&wg, i, *pvcClone, *appClone)
|
}(&wg, i, *pvcClone, *appClone)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup)
|
||||||
if len(subVol) != totalSubvolumes {
|
|
||||||
msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), totalSubvolumes)
|
|
||||||
e2elog.Logf(msg)
|
|
||||||
Fail(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
wg.Add(totalCount)
|
wg.Add(totalCount)
|
||||||
// delete snapshot
|
// delete snapshot
|
||||||
@ -524,8 +567,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
|
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
|
||||||
err = deleteSnapshot(&s, deployTimeout)
|
err = deleteSnapshot(&s, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to delete snapshot %v", err)
|
e2elog.Failf("failed to delete snapshot with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
w.Done()
|
w.Done()
|
||||||
}(&wg, i, snap)
|
}(&wg, i, snap)
|
||||||
@ -540,40 +582,28 @@ var _ = Describe("cephfs", func() {
|
|||||||
p.Spec.DataSource.Name = name
|
p.Spec.DataSource.Name = name
|
||||||
err = deletePVCAndApp(name, f, &p, &a)
|
err = deletePVCAndApp(name, f, &p, &a)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to delete pvc and app %v", err)
|
e2elog.Failf("failed to delete PVC and app with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
w.Done()
|
w.Done()
|
||||||
}(&wg, i, *pvcClone, *appClone)
|
}(&wg, i, *pvcClone, *appClone)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
validateSubvolumeCount(f, parentPVCCount, fileSystemName, subvolumegroup)
|
||||||
if len(subVol) != parentPVCCount {
|
|
||||||
msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), parentPVCCount)
|
|
||||||
e2elog.Logf(msg)
|
|
||||||
Fail(msg)
|
|
||||||
}
|
|
||||||
// delete parent pvc
|
// delete parent pvc
|
||||||
err = deletePVCAndApp("", f, pvc, app)
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||||
if len(subVol) != 0 {
|
|
||||||
msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), 0)
|
|
||||||
e2elog.Logf(msg)
|
|
||||||
Fail(msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("create a PVC-PVC clone and bind it to an app", func() {
|
By("create a PVC-PVC clone and bind it to an app", func() {
|
||||||
v, err := f.ClientSet.Discovery().ServerVersion()
|
v, err := f.ClientSet.Discovery().ServerVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to get server version with error %v", err)
|
e2elog.Failf("failed to get server version with error with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
// pvc clone is only supported from v1.16+
|
// pvc clone is only supported from v1.16+
|
||||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "16") {
|
if v.Major > "1" || (v.Major == "1" && v.Minor >= "16") {
|
||||||
@ -584,34 +614,34 @@ var _ = Describe("cephfs", func() {
|
|||||||
totalSubvolumes := totalCount + 1
|
totalSubvolumes := totalCount + 1
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load PVC with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pvc.Namespace = f.UniqueName
|
pvc.Namespace = f.UniqueName
|
||||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create PVC with error %v", err)
|
||||||
}
|
}
|
||||||
app, err := loadApp(appPath)
|
app, err := loadApp(appPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load application with error %v", err)
|
||||||
}
|
}
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
||||||
wErr := writeDataInPod(app, f)
|
wErr := writeDataInPod(app, f)
|
||||||
if wErr != nil {
|
if wErr != nil {
|
||||||
Fail(wErr.Error())
|
e2elog.Failf("failed to write data from application %v", wErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
pvcClone, err := loadPVC(pvcSmartClonePath)
|
pvcClone, err := loadPVC(pvcSmartClonePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load PVC with error %v", err)
|
||||||
}
|
}
|
||||||
pvcClone.Spec.DataSource.Name = pvc.Name
|
pvcClone.Spec.DataSource.Name = pvc.Name
|
||||||
pvcClone.Namespace = f.UniqueName
|
pvcClone.Namespace = f.UniqueName
|
||||||
appClone, err := loadApp(appSmartClonePath)
|
appClone, err := loadApp(appSmartClonePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load application with error %v", err)
|
||||||
}
|
}
|
||||||
appClone.Namespace = f.UniqueName
|
appClone.Namespace = f.UniqueName
|
||||||
wg.Add(totalCount)
|
wg.Add(totalCount)
|
||||||
@ -621,23 +651,19 @@ var _ = Describe("cephfs", func() {
|
|||||||
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
||||||
err = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
err = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
w.Done()
|
w.Done()
|
||||||
}(&wg, i, *pvcClone, *appClone)
|
}(&wg, i, *pvcClone, *appClone)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
subVol := listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
validateSubvolumeCount(f, totalSubvolumes, fileSystemName, subvolumegroup)
|
||||||
if len(subVol) != totalSubvolumes {
|
|
||||||
msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), totalSubvolumes)
|
|
||||||
e2elog.Logf(msg)
|
|
||||||
Fail(msg)
|
|
||||||
}
|
|
||||||
// delete parent pvc
|
// delete parent pvc
|
||||||
err = deletePVCAndApp("", f, pvc, app)
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(totalCount)
|
wg.Add(totalCount)
|
||||||
@ -648,34 +674,29 @@ var _ = Describe("cephfs", func() {
|
|||||||
p.Spec.DataSource.Name = name
|
p.Spec.DataSource.Name = name
|
||||||
err = deletePVCAndApp(name, f, &p, &a)
|
err = deletePVCAndApp(name, f, &p, &a)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
w.Done()
|
w.Done()
|
||||||
}(&wg, i, *pvcClone, *appClone)
|
}(&wg, i, *pvcClone, *appClone)
|
||||||
}
|
}
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
subVol = listCephFSSubVolumes(f, fileSystemName, subvolumegroup)
|
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||||
if len(subVol) != 0 {
|
|
||||||
msg := fmt.Sprintf("subvolumes %v. subvolume count %d not matching expected count %v", subVol, len(subVol), 0)
|
|
||||||
e2elog.Logf(msg)
|
|
||||||
Fail(msg)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("Create ROX PVC and Bind it to an app", func() {
|
By("Create ROX PVC and bind it to an app", func() {
|
||||||
// create pvc and bind it to an app
|
// create PVC and bind it to an app
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load PVC with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
pvc.Namespace = f.UniqueName
|
pvc.Namespace = f.UniqueName
|
||||||
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}
|
pvc.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadOnlyMany}
|
||||||
app, err := loadApp(appPath)
|
app, err := loadApp(appPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load application with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
@ -686,7 +707,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
app.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvc.Name
|
||||||
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
opt := metav1.ListOptions{
|
opt := metav1.ListOptions{
|
||||||
@ -697,27 +718,25 @@ var _ = Describe("cephfs", func() {
|
|||||||
_, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt)
|
_, stdErr := execCommandInPodAndAllowFail(f, fmt.Sprintf("echo 'Hello World' > %s", filePath), app.Namespace, &opt)
|
||||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||||
if !strings.Contains(stdErr, readOnlyErr) {
|
if !strings.Contains(stdErr, readOnlyErr) {
|
||||||
Fail(stdErr)
|
e2elog.Failf(stdErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete pvc and app
|
// delete PVC and app
|
||||||
err = deletePVCAndApp("", f, pvc, app)
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete PVC or application with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
// Make sure this should be last testcase in this file, because
|
// Make sure this should be last testcase in this file, because
|
||||||
// it deletes pool
|
// it deletes pool
|
||||||
By("Create a PVC and Delete PVC when backend pool deleted", func() {
|
By("Create a PVC and delete PVC when backend pool deleted", func() {
|
||||||
err := pvcDeleteWhenPoolNotFound(pvcPath, true, f)
|
err := pvcDeleteWhenPoolNotFound(pvcPath, true, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete PVC with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
})
|
})
|
||||||
|
131
e2e/cephfs_helper.go
Normal file
131
e2e/cephfs_helper.go
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
adminUser = "admin"
|
||||||
|
)
|
||||||
|
|
||||||
|
// validateSubvolumegroup validates whether subvolumegroup is present.
|
||||||
|
func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error {
|
||||||
|
cmd := fmt.Sprintf("ceph fs subvolumegroup getpath myfs %s", subvolgrp)
|
||||||
|
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("failed to getpath for subvolumegroup %s with error %v", subvolgrp, stdErr)
|
||||||
|
}
|
||||||
|
expectedGrpPath := "/volumes/" + subvolgrp
|
||||||
|
stdOut = strings.TrimSpace(stdOut)
|
||||||
|
if stdOut != expectedGrpPath {
|
||||||
|
return fmt.Errorf("error unexpected group path. Found: %s", stdOut)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, enablePool bool, clusterID string) error {
|
||||||
|
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml")
|
||||||
|
sc, err := getStorageClass(scPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
sc.Parameters["fsName"] = "myfs"
|
||||||
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = rookNamespace
|
||||||
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephfsProvisionerSecretName
|
||||||
|
|
||||||
|
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = rookNamespace
|
||||||
|
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = cephfsProvisionerSecretName
|
||||||
|
|
||||||
|
sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = rookNamespace
|
||||||
|
sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = cephfsNodePluginSecretName
|
||||||
|
|
||||||
|
if enablePool {
|
||||||
|
sc.Parameters["pool"] = "myfs-data0"
|
||||||
|
}
|
||||||
|
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error getting fsid %v", stdErr)
|
||||||
|
}
|
||||||
|
// remove new line present in fsID
|
||||||
|
fsID = strings.Trim(fsID, "\n")
|
||||||
|
if clusterID != "" {
|
||||||
|
fsID = clusterID
|
||||||
|
}
|
||||||
|
sc.Namespace = cephCSINamespace
|
||||||
|
sc.Parameters["clusterID"] = fsID
|
||||||
|
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func createCephfsSecret(c kubernetes.Interface, f *framework.Framework) error {
|
||||||
|
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "secret.yaml")
|
||||||
|
sc, err := getSecret(scPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
adminKey, stdErr, err := execCommandInToolBoxPod(f, "ceph auth get-key client.admin", rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error getting admin key %v", stdErr)
|
||||||
|
}
|
||||||
|
sc.StringData["adminID"] = adminUser
|
||||||
|
sc.StringData["adminKey"] = adminKey
|
||||||
|
delete(sc.StringData, "userID")
|
||||||
|
delete(sc.StringData, "userKey")
|
||||||
|
sc.Namespace = cephCSINamespace
|
||||||
|
_, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
||||||
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, stdErr, err := execCommandInToolBoxPod(f, "ceph fs subvolume rm myfs "+imageData.imageName+" "+subvolumegroup, rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error deleting backing volume %s %v", imageData.imageName, stdErr)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type cephfsSubVolume struct {
|
||||||
|
Name string `json:"name"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func listCephFSSubVolumes(f *framework.Framework, filesystem, groupname string) ([]cephfsSubVolume, error) {
|
||||||
|
var subVols []cephfsSubVolume
|
||||||
|
stdout, stdErr, err := execCommandInToolBoxPod(f, fmt.Sprintf("ceph fs subvolume ls %s --group_name=%s --format=json", filesystem, groupname), rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return subVols, err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return subVols, fmt.Errorf("error listing subolumes %v", stdErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal([]byte(stdout), &subVols)
|
||||||
|
if err != nil {
|
||||||
|
return subVols, err
|
||||||
|
}
|
||||||
|
return subVols, nil
|
||||||
|
}
|
120
e2e/configmap.go
Normal file
120
e2e/configmap.go
Normal file
@ -0,0 +1,120 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/ceph/ceph-csi/internal/util"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
)
|
||||||
|
|
||||||
|
func deleteConfigMap(pluginPath string) error {
|
||||||
|
path := pluginPath + configMap
|
||||||
|
_, err := framework.RunKubectl(cephCSINamespace, "delete", "-f", path, ns)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Framework) error {
|
||||||
|
path := pluginPath + configMap
|
||||||
|
cm := v1.ConfigMap{}
|
||||||
|
err := unmarshal(path, &cm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error getting fsid %v", stdErr)
|
||||||
|
}
|
||||||
|
// remove new line present in fsID
|
||||||
|
fsID = strings.Trim(fsID, "\n")
|
||||||
|
// get mon list
|
||||||
|
mons, err := getMons(rookNamespace, c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
conmap := []util.ClusterInfo{{
|
||||||
|
ClusterID: fsID,
|
||||||
|
Monitors: mons,
|
||||||
|
RadosNamespace: radosNamespace,
|
||||||
|
}}
|
||||||
|
if upgradeTesting {
|
||||||
|
subvolumegroup = "csi"
|
||||||
|
}
|
||||||
|
conmap[0].CephFS.SubvolumeGroup = subvolumegroup
|
||||||
|
data, err := json.Marshal(conmap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cm.Data["config.json"] = string(data)
|
||||||
|
cm.Namespace = cephCSINamespace
|
||||||
|
// if the configmap is present update it,during cephcsi helm charts
|
||||||
|
// deployment empty configmap gets created we need to override it
|
||||||
|
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Get(context.TODO(), cm.Name, metav1.GetOptions{})
|
||||||
|
|
||||||
|
if err == nil {
|
||||||
|
_, updateErr := c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{})
|
||||||
|
if updateErr != nil {
|
||||||
|
return updateErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Create(context.TODO(), &cm, metav1.CreateOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// createCustomConfigMap provides multiple clusters information.
|
||||||
|
func createCustomConfigMap(c kubernetes.Interface, pluginPath string, subvolgrpInfo map[string]string) error {
|
||||||
|
path := pluginPath + configMap
|
||||||
|
cm := v1.ConfigMap{}
|
||||||
|
err := unmarshal(path, &cm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// get mon list
|
||||||
|
mons, err := getMons(rookNamespace, c)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// get clusterIDs
|
||||||
|
var clusterID []string
|
||||||
|
for key := range subvolgrpInfo {
|
||||||
|
clusterID = append(clusterID, key)
|
||||||
|
}
|
||||||
|
conmap := []util.ClusterInfo{
|
||||||
|
{
|
||||||
|
ClusterID: clusterID[0],
|
||||||
|
Monitors: mons,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ClusterID: clusterID[1],
|
||||||
|
Monitors: mons,
|
||||||
|
}}
|
||||||
|
for i := 0; i < len(subvolgrpInfo); i++ {
|
||||||
|
conmap[i].CephFS.SubvolumeGroup = subvolgrpInfo[clusterID[i]]
|
||||||
|
}
|
||||||
|
data, err := json.Marshal(conmap)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
cm.Data["config.json"] = string(data)
|
||||||
|
cm.Namespace = cephCSINamespace
|
||||||
|
// since a configmap is already created, update the existing configmap
|
||||||
|
_, err = c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
75
e2e/namespace.go
Normal file
75
e2e/namespace.go
Normal file
@ -0,0 +1,75 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createNamespace(c kubernetes.Interface, name string) error {
|
||||||
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
|
ns := &v1.Namespace{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: name,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_, err := c.CoreV1().Namespaces().Create(context.TODO(), ns, metav1.CreateOptions{})
|
||||||
|
if err != nil && !apierrs.IsAlreadyExists(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
_, err := c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("Error getting namespace: '%s': %v", name, err)
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if testutils.IsRetryableAPIError(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteNamespace(c kubernetes.Interface, name string) error {
|
||||||
|
timeout := time.Duration(deployTimeout) * time.Minute
|
||||||
|
err := c.CoreV1().Namespaces().Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
if err != nil && !apierrs.IsNotFound(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
_, err = c.CoreV1().Namespaces().Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
e2elog.Logf("Error getting namespace: '%s': %v", name, err)
|
||||||
|
if testutils.IsRetryableAPIError(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func replaceNamespaceInTemplate(filePath string) (string, error) {
|
||||||
|
read, err := ioutil.ReadFile(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return strings.ReplaceAll(string(read), "namespace: default", fmt.Sprintf("namespace: %s", cephCSINamespace)), nil
|
||||||
|
}
|
44
e2e/node.go
Normal file
44
e2e/node.go
Normal file
@ -0,0 +1,44 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
)
|
||||||
|
|
||||||
|
func createNodeLabel(f *framework.Framework, labelKey, labelValue string) error {
|
||||||
|
// NOTE: This makes all nodes (in a multi-node setup) in the test take
|
||||||
|
// the same label values, which is fine for the test
|
||||||
|
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for i := range nodes.Items {
|
||||||
|
framework.AddOrUpdateLabelOnNode(f.ClientSet, nodes.Items[i].Name, labelKey, labelValue)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteNodeLabel(c kubernetes.Interface, labelKey string) error {
|
||||||
|
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for i := range nodes.Items {
|
||||||
|
framework.RemoveLabelOffNode(c, nodes.Items[i].Name, labelKey)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkNodeHasLabel(c kubernetes.Interface, labelKey, labelValue string) error {
|
||||||
|
nodes, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for i := range nodes.Items {
|
||||||
|
framework.ExpectNodeHasLabel(c, nodes.Items[i].Name, labelKey, labelValue)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
212
e2e/pod.go
Normal file
212
e2e/pod.go
Normal file
@ -0,0 +1,212 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/pkg/client/conditions"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
func waitForDaemonSets(name, ns string, c kubernetes.Interface, t int) error {
|
||||||
|
timeout := time.Duration(t) * time.Minute
|
||||||
|
start := time.Now()
|
||||||
|
e2elog.Logf("Waiting up to %v for all daemonsets in namespace '%s' to start", timeout, ns)
|
||||||
|
|
||||||
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
ds, err := c.AppsV1().DaemonSets(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("Error getting daemonsets in namespace: '%s': %v", ns, err)
|
||||||
|
if strings.Contains(err.Error(), "not found") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if testutils.IsRetryableAPIError(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
dNum := ds.Status.DesiredNumberScheduled
|
||||||
|
ready := ds.Status.NumberReady
|
||||||
|
e2elog.Logf("%d / %d pods ready in namespace '%s' in daemonset '%s' (%d seconds elapsed)", ready, dNum, ns, ds.ObjectMeta.Name, int(time.Since(start).Seconds()))
|
||||||
|
if ready != dNum {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Waits for the deployment to complete.
|
||||||
|
|
||||||
|
func waitForDeploymentComplete(name, ns string, c kubernetes.Interface, t int) error {
|
||||||
|
var (
|
||||||
|
deployment *appsv1.Deployment
|
||||||
|
reason string
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
timeout := time.Duration(t) * time.Minute
|
||||||
|
err = wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
deployment, err = c.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO need to check rolling update
|
||||||
|
|
||||||
|
// When the deployment status and its underlying resources reach the
|
||||||
|
// desired state, we're done
|
||||||
|
if deployment.Status.Replicas == deployment.Status.ReadyReplicas {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
e2elog.Logf("deployment status: expected replica count %d running replica count %d", deployment.Status.Replicas, deployment.Status.ReadyReplicas)
|
||||||
|
reason = fmt.Sprintf("deployment status: %#v", deployment.Status.String())
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if errors.Is(err, wait.ErrWaitTimeout) {
|
||||||
|
err = fmt.Errorf("%s", reason)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error waiting for deployment %q status to match expectation: %w", name, err)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getCommandInPodOpts(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (framework.ExecOptions, error) {
|
||||||
|
cmd := []string{"/bin/sh", "-c", c}
|
||||||
|
podList, err := f.PodClientNS(ns).List(context.TODO(), *opt)
|
||||||
|
framework.ExpectNoError(err)
|
||||||
|
if len(podList.Items) == 0 {
|
||||||
|
return framework.ExecOptions{}, errors.New("podlist is empty")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return framework.ExecOptions{}, err
|
||||||
|
}
|
||||||
|
return framework.ExecOptions{
|
||||||
|
Command: cmd,
|
||||||
|
PodName: podList.Items[0].Name,
|
||||||
|
Namespace: ns,
|
||||||
|
ContainerName: podList.Items[0].Spec.Containers[0].Name,
|
||||||
|
Stdin: nil,
|
||||||
|
CaptureStdout: true,
|
||||||
|
CaptureStderr: true,
|
||||||
|
PreserveWhitespace: true,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func execCommandInPod(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string, error) {
|
||||||
|
podPot, err := getCommandInPodOpts(f, c, ns, opt)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
stdOut, stdErr, err := f.ExecWithOptions(podPot)
|
||||||
|
if stdErr != "" {
|
||||||
|
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||||
|
}
|
||||||
|
return stdOut, stdErr, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func execCommandInToolBoxPod(f *framework.Framework, c, ns string) (string, string, error) {
|
||||||
|
opt := &metav1.ListOptions{
|
||||||
|
LabelSelector: rookTolBoxPodLabel,
|
||||||
|
}
|
||||||
|
podPot, err := getCommandInPodOpts(f, c, ns, opt)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
stdOut, stdErr, err := f.ExecWithOptions(podPot)
|
||||||
|
if stdErr != "" {
|
||||||
|
e2elog.Logf("stdErr occurred: %v", stdErr)
|
||||||
|
}
|
||||||
|
return stdOut, stdErr, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func execCommandInPodAndAllowFail(f *framework.Framework, c, ns string, opt *metav1.ListOptions) (string, string) {
|
||||||
|
podPot, err := getCommandInPodOpts(f, c, ns, opt)
|
||||||
|
if err != nil {
|
||||||
|
return "", err.Error()
|
||||||
|
}
|
||||||
|
stdOut, stdErr, err := f.ExecWithOptions(podPot)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("command %s failed: %v", c, err)
|
||||||
|
}
|
||||||
|
return stdOut, stdErr
|
||||||
|
}
|
||||||
|
|
||||||
|
func loadApp(path string) (*v1.Pod, error) {
|
||||||
|
app := v1.Pod{}
|
||||||
|
err := unmarshal(path, &app)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &app, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createApp(c kubernetes.Interface, app *v1.Pod, timeout int) error {
|
||||||
|
_, err := c.CoreV1().Pods(app.Namespace).Create(context.TODO(), app, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return waitForPodInRunningState(app.Name, app.Namespace, c, timeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForPodInRunningState(name, ns string, c kubernetes.Interface, t int) error {
|
||||||
|
timeout := time.Duration(t) * time.Minute
|
||||||
|
start := time.Now()
|
||||||
|
e2elog.Logf("Waiting up to %v to be in Running state", name)
|
||||||
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
pod, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
switch pod.Status.Phase {
|
||||||
|
case v1.PodRunning:
|
||||||
|
return true, nil
|
||||||
|
case v1.PodFailed, v1.PodSucceeded:
|
||||||
|
return false, conditions.ErrPodCompleted
|
||||||
|
}
|
||||||
|
e2elog.Logf("%s app is in %s phase expected to be in Running state (%d seconds elapsed)", name, pod.Status.Phase, int(time.Since(start).Seconds()))
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func deletePod(name, ns string, c kubernetes.Interface, t int) error {
|
||||||
|
timeout := time.Duration(t) * time.Minute
|
||||||
|
err := c.CoreV1().Pods(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
start := time.Now()
|
||||||
|
e2elog.Logf("Waiting for pod %v to be deleted", name)
|
||||||
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
_, err := c.CoreV1().Pods(ns).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
e2elog.Logf("%s app to be deleted (%d seconds elapsed)", name, int(time.Since(start).Seconds()))
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func deletePodWithLabel(label, ns string, skipNotFound bool) error {
|
||||||
|
_, err := framework.RunKubectl(cephCSINamespace, "delete", "po", "-l", label, fmt.Sprintf("--ignore-not-found=%t", skipNotFound), fmt.Sprintf("--namespace=%s", ns))
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("failed to delete pod %v", err)
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
171
e2e/pvc.go
Normal file
171
e2e/pvc.go
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
e2epv "k8s.io/kubernetes/test/e2e/framework/pv"
|
||||||
|
testutils "k8s.io/kubernetes/test/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
func loadPVC(path string) (*v1.PersistentVolumeClaim, error) {
|
||||||
|
pvc := &v1.PersistentVolumeClaim{}
|
||||||
|
err := unmarshal(path, &pvc)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return pvc, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func createPVCAndvalidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error {
|
||||||
|
timeout := time.Duration(t) * time.Minute
|
||||||
|
pv := &v1.PersistentVolume{}
|
||||||
|
var err error
|
||||||
|
_, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(context.TODO(), pvc, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if timeout == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
name := pvc.Name
|
||||||
|
start := time.Now()
|
||||||
|
e2elog.Logf("Waiting up to %v to be in Bound state", pvc)
|
||||||
|
|
||||||
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
e2elog.Logf("waiting for PVC %s (%d seconds elapsed)", pvc.Name, int(time.Since(start).Seconds()))
|
||||||
|
pvc, err = c.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Logf("Error getting pvc in namespace: '%s': %v", pvc.Namespace, err)
|
||||||
|
if testutils.IsRetryableAPIError(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if pvc.Spec.VolumeName == "" {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pv, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
if apierrs.IsNotFound(err) {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
err = e2epv.WaitOnPVandPVC(c, pvc.Namespace, pv, pvc)
|
||||||
|
if err != nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func deletePVCAndValidatePV(c kubernetes.Interface, pvc *v1.PersistentVolumeClaim, t int) error {
|
||||||
|
timeout := time.Duration(t) * time.Minute
|
||||||
|
nameSpace := pvc.Namespace
|
||||||
|
name := pvc.Name
|
||||||
|
var err error
|
||||||
|
e2elog.Logf("Deleting PersistentVolumeClaim %v on namespace %v", name, nameSpace)
|
||||||
|
|
||||||
|
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
pv, err := c.CoreV1().PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = c.CoreV1().PersistentVolumeClaims(nameSpace).Delete(context.TODO(), name, metav1.DeleteOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("delete of PVC %v failed: %w", name, err)
|
||||||
|
}
|
||||||
|
start := time.Now()
|
||||||
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
|
// Check that the PVC is really deleted.
|
||||||
|
e2elog.Logf("waiting for PVC %s in state %s to be deleted (%d seconds elapsed)", name, pvc.Status.String(), int(time.Since(start).Seconds()))
|
||||||
|
pvc, err = c.CoreV1().PersistentVolumeClaims(nameSpace).Get(context.TODO(), name, metav1.GetOptions{})
|
||||||
|
if err == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
if !apierrs.IsNotFound(err) {
|
||||||
|
return false, fmt.Errorf("get on deleted PVC %v failed with error other than \"not found\": %w", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Examine the pv.ClaimRef and UID. Expect nil values.
|
||||||
|
_, err = c.CoreV1().PersistentVolumes().Get(context.TODO(), pv.Name, metav1.GetOptions{})
|
||||||
|
if err == nil {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !apierrs.IsNotFound(err) {
|
||||||
|
return false, fmt.Errorf("delete PV %v failed with error other than \"not found\": %w", pv.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// getBoundPV returns a PV details.
|
||||||
|
func getBoundPV(client kubernetes.Interface, pvc *v1.PersistentVolumeClaim) (*v1.PersistentVolume, error) {
|
||||||
|
// Get new copy of the claim
|
||||||
|
claim, err := client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the bound PV
|
||||||
|
pv, err := client.CoreV1().PersistentVolumes().Get(context.TODO(), claim.Spec.VolumeName, metav1.GetOptions{})
|
||||||
|
return pv, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVSelectorValuesForPVC(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
||||||
|
pv, err := getBoundPV(f.ClientSet, pvc)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
|
||||||
|
return errors.New("found empty NodeSelectorTerms in PV")
|
||||||
|
}
|
||||||
|
|
||||||
|
rFound := false
|
||||||
|
zFound := false
|
||||||
|
for _, expression := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions {
|
||||||
|
switch expression.Key {
|
||||||
|
case nodeCSIRegionLabel:
|
||||||
|
if rFound {
|
||||||
|
return errors.New("found multiple occurrences of topology key for region")
|
||||||
|
}
|
||||||
|
rFound = true
|
||||||
|
if expression.Values[0] != regionValue {
|
||||||
|
return errors.New("topology value for region label mismatch")
|
||||||
|
}
|
||||||
|
case nodeCSIZoneLabel:
|
||||||
|
if zFound {
|
||||||
|
return errors.New("found multiple occurrences of topology key for zone")
|
||||||
|
}
|
||||||
|
zFound = true
|
||||||
|
if expression.Values[0] != zoneValue {
|
||||||
|
return errors.New("topology value for zone label mismatch")
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return errors.New("unexpected key in node selector terms found in PV")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
832
e2e/rbd.go
832
e2e/rbd.go
File diff suppressed because it is too large
Load Diff
390
e2e/rbd_helper.go
Normal file
390
e2e/rbd_helper.go
Normal file
@ -0,0 +1,390 @@
|
|||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
scv1 "k8s.io/api/storage/v1"
|
||||||
|
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/client-go/kubernetes"
|
||||||
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
|
||||||
|
)
|
||||||
|
|
||||||
|
func imageSpec(pool, image string) string {
|
||||||
|
if radosNamespace != "" {
|
||||||
|
return pool + "/" + radosNamespace + "/" + image
|
||||||
|
}
|
||||||
|
return pool + "/" + image
|
||||||
|
}
|
||||||
|
|
||||||
|
func rbdOptions(pool string) string {
|
||||||
|
if radosNamespace != "" {
|
||||||
|
return "--pool=" + pool + " --namespace " + radosNamespace
|
||||||
|
}
|
||||||
|
return "--pool=" + pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, scOptions, parameters map[string]string) error {
|
||||||
|
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "storageclass.yaml")
|
||||||
|
sc, err := getStorageClass(scPath)
|
||||||
|
if err != nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
sc.Parameters["pool"] = defaultRBDPool
|
||||||
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = rookNamespace
|
||||||
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = rbdProvisionerSecretName
|
||||||
|
|
||||||
|
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = rookNamespace
|
||||||
|
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = rbdProvisionerSecretName
|
||||||
|
|
||||||
|
sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = rookNamespace
|
||||||
|
sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = rbdNodePluginSecretName
|
||||||
|
|
||||||
|
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error getting fsid %v", stdErr)
|
||||||
|
}
|
||||||
|
// remove new line present in fsID
|
||||||
|
fsID = strings.Trim(fsID, "\n")
|
||||||
|
|
||||||
|
sc.Parameters["clusterID"] = fsID
|
||||||
|
for k, v := range parameters {
|
||||||
|
sc.Parameters[k] = v
|
||||||
|
}
|
||||||
|
sc.Namespace = cephCSINamespace
|
||||||
|
|
||||||
|
if scOptions["volumeBindingMode"] == "WaitForFirstConsumer" {
|
||||||
|
value := scv1.VolumeBindingWaitForFirstConsumer
|
||||||
|
sc.VolumeBindingMode = &value
|
||||||
|
}
|
||||||
|
|
||||||
|
// comma separated mount options
|
||||||
|
if opt, ok := scOptions[rbdmountOptions]; ok {
|
||||||
|
mOpt := strings.Split(opt, ",")
|
||||||
|
sc.MountOptions = append(sc.MountOptions, mOpt...)
|
||||||
|
}
|
||||||
|
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func createRadosNamespace(f *framework.Framework) error {
|
||||||
|
stdOut, stdErr, err := execCommandInToolBoxPod(f,
|
||||||
|
fmt.Sprintf("rbd namespace ls --pool=%s", defaultRBDPool), rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error listing rbd namespace %v", stdErr)
|
||||||
|
}
|
||||||
|
if !strings.Contains(stdOut, radosNamespace) {
|
||||||
|
_, stdErr, err = execCommandInToolBoxPod(f,
|
||||||
|
fmt.Sprintf("rbd namespace create %s", rbdOptions(defaultRBDPool)), rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error creating rbd namespace %v", stdErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stdOut, stdErr, err = execCommandInToolBoxPod(f,
|
||||||
|
fmt.Sprintf("rbd namespace ls --pool=%s", rbdTopologyPool), rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error listing rbd namespace %v", stdErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(stdOut, radosNamespace) {
|
||||||
|
_, stdErr, err = execCommandInToolBoxPod(f,
|
||||||
|
fmt.Sprintf("rbd namespace create %s", rbdOptions(rbdTopologyPool)), rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error creating rbd namespace %v", stdErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createRBDSecret(c kubernetes.Interface, f *framework.Framework) error {
|
||||||
|
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "secret.yaml")
|
||||||
|
sc, err := getSecret(scPath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
adminKey, stdErr, err := execCommandInToolBoxPod(f, "ceph auth get-key client.admin", rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error getting admin key %v", stdErr)
|
||||||
|
}
|
||||||
|
sc.StringData["userID"] = adminUser
|
||||||
|
sc.StringData["userKey"] = adminKey
|
||||||
|
sc.Namespace = cephCSINamespace
|
||||||
|
_, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = updateSecretForEncryption(c)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type imageInfoFromPVC struct {
|
||||||
|
imageID string
|
||||||
|
imageName string
|
||||||
|
csiVolumeHandle string
|
||||||
|
pvName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// getImageInfoFromPVC reads volume handle of the bound PV to the passed in PVC,
|
||||||
|
// and returns imageInfoFromPVC or error.
|
||||||
|
func getImageInfoFromPVC(pvcNamespace, pvcName string, f *framework.Framework) (imageInfoFromPVC, error) {
|
||||||
|
var imageData imageInfoFromPVC
|
||||||
|
|
||||||
|
c := f.ClientSet.CoreV1()
|
||||||
|
pvc, err := c.PersistentVolumeClaims(pvcNamespace).Get(context.TODO(), pvcName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return imageData, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pv, err := c.PersistentVolumes().Get(context.TODO(), pvc.Spec.VolumeName, metav1.GetOptions{})
|
||||||
|
if err != nil {
|
||||||
|
return imageData, err
|
||||||
|
}
|
||||||
|
|
||||||
|
imageIDRegex := regexp.MustCompile(`(\w+\-?){5}$`)
|
||||||
|
imageID := imageIDRegex.FindString(pv.Spec.CSI.VolumeHandle)
|
||||||
|
|
||||||
|
imageData = imageInfoFromPVC{
|
||||||
|
imageID: imageID,
|
||||||
|
imageName: fmt.Sprintf("csi-vol-%s", imageID),
|
||||||
|
csiVolumeHandle: pv.Spec.CSI.VolumeHandle,
|
||||||
|
pvName: pv.Name,
|
||||||
|
}
|
||||||
|
return imageData, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getImageMeta(rbdImageSpec, metaKey string, f *framework.Framework) (string, error) {
|
||||||
|
cmd := fmt.Sprintf("rbd image-meta get %s %s", rbdImageSpec, metaKey)
|
||||||
|
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return strings.TrimSpace(stdOut), fmt.Errorf(stdErr)
|
||||||
|
}
|
||||||
|
return strings.TrimSpace(stdOut), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framework.Framework) error {
|
||||||
|
pvc, app, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName)
|
||||||
|
encryptedState, err := getImageMeta(rbdImageSpec, ".rbd.csi.ceph.com/encrypted", f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if encryptedState != "encrypted" {
|
||||||
|
return fmt.Errorf("%v not equal to encrypted", encryptedState)
|
||||||
|
}
|
||||||
|
|
||||||
|
volumeMountPath := app.Spec.Containers[0].VolumeMounts[0].MountPath
|
||||||
|
mountType, err := getMountType(app.Name, app.Namespace, volumeMountPath, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if mountType != "crypt" {
|
||||||
|
return fmt.Errorf("%v not equal to crypt", mountType)
|
||||||
|
}
|
||||||
|
|
||||||
|
if kms == "vault" {
|
||||||
|
// check new passphrase created
|
||||||
|
_, stdErr := readVaultSecret(imageData.csiVolumeHandle, f)
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("failed to read passphrase from vault: %s", stdErr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if kms == "vault" {
|
||||||
|
// check new passphrase created
|
||||||
|
stdOut, _ := readVaultSecret(imageData.csiVolumeHandle, f)
|
||||||
|
if stdOut != "" {
|
||||||
|
return fmt.Errorf("passphrase found in vault while should be deleted: %s", stdOut)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func listRBDImages(f *framework.Framework) ([]string, error) {
|
||||||
|
var imgInfos []string
|
||||||
|
|
||||||
|
stdout, stdErr, err := execCommandInToolBoxPod(f,
|
||||||
|
fmt.Sprintf("rbd ls --format=json %s", rbdOptions(defaultRBDPool)), rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return imgInfos, err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return imgInfos, fmt.Errorf("failed to list images %v", stdErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = json.Unmarshal([]byte(stdout), &imgInfos)
|
||||||
|
if err != nil {
|
||||||
|
return imgInfos, err
|
||||||
|
}
|
||||||
|
return imgInfos, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
||||||
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := fmt.Sprintf("rbd rm %s %s", rbdOptions(defaultRBDPool), imageData.imageName)
|
||||||
|
_, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func deletePool(name string, cephfs bool, f *framework.Framework) error {
|
||||||
|
var cmds = []string{}
|
||||||
|
if cephfs {
|
||||||
|
// ceph fs fail
|
||||||
|
// ceph fs rm myfs --yes-i-really-mean-it
|
||||||
|
// ceph osd pool delete myfs-metadata myfs-metadata
|
||||||
|
// --yes-i-really-mean-it
|
||||||
|
// ceph osd pool delete myfs-data0 myfs-data0
|
||||||
|
// --yes-i-really-mean-it
|
||||||
|
cmds = append(cmds, fmt.Sprintf("ceph fs fail %s", name),
|
||||||
|
fmt.Sprintf("ceph fs rm %s --yes-i-really-mean-it", name),
|
||||||
|
fmt.Sprintf("ceph osd pool delete %s-metadata %s-metadata --yes-i-really-really-mean-it", name, name),
|
||||||
|
fmt.Sprintf("ceph osd pool delete %s-data0 %s-data0 --yes-i-really-really-mean-it", name, name))
|
||||||
|
} else {
|
||||||
|
// ceph osd pool delete replicapool replicapool
|
||||||
|
// --yes-i-really-mean-it
|
||||||
|
cmds = append(cmds, fmt.Sprintf("ceph osd pool delete %s %s --yes-i-really-really-mean-it", name, name))
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, cmd := range cmds {
|
||||||
|
// discard stdErr as some commands prints warning in strErr
|
||||||
|
_, _, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) (string, error) {
|
||||||
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
stdOut, stdErr, err := execCommandInToolBoxPod(f,
|
||||||
|
fmt.Sprintf("rbd info %s", imageSpec(pool, imageData.imageName)), rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return "", fmt.Errorf("failed to get rbd info %v", stdErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if radosNamespace != "" {
|
||||||
|
e2elog.Logf("found image %s in pool %s namespace %s", imageData.imageName, pool, radosNamespace)
|
||||||
|
} else {
|
||||||
|
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
return stdOut, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVCImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||||
|
_, err := getPVCImageInfoInPool(f, pvc, pool)
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVCDataPoolForImageInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool, dataPool string) error {
|
||||||
|
stdOut, err := getPVCImageInfoInPool(f, pvc, pool)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !strings.Contains(stdOut, "data_pool: "+dataPool) {
|
||||||
|
return fmt.Errorf("missing data pool value in image info, got info (%s)", stdOut)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||||
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, stdErr, err := execCommandInToolBoxPod(f,
|
||||||
|
fmt.Sprintf("rados listomapkeys %s csi.volume.%s", rbdOptions(pool), imageData.imageID), rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("failed to listomapkeys %v", stdErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if radosNamespace != "" {
|
||||||
|
e2elog.Logf("found image journal %s in pool %s namespace %s", "csi.volume."+imageData.imageID, pool, radosNamespace)
|
||||||
|
} else {
|
||||||
|
e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func checkPVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim, pool string) error {
|
||||||
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, stdErr, err := execCommandInToolBoxPod(f,
|
||||||
|
fmt.Sprintf("rados getomapval %s csi.volumes.default csi.volume.%s", rbdOptions(pool), imageData.pvName), rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("error getting fsid %v", stdErr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if radosNamespace != "" {
|
||||||
|
e2elog.Logf("found CSI journal entry %s in pool %s namespace %s", "csi.volume."+imageData.pvName, pool, radosNamespace)
|
||||||
|
} else {
|
||||||
|
e2elog.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -161,8 +161,10 @@ func checkAppMntSize(f *framework.Framework, opt *metav1.ListOptions, size, cmd,
|
|||||||
|
|
||||||
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
return wait.PollImmediate(poll, timeout, func() (bool, error) {
|
||||||
e2elog.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds()))
|
e2elog.Logf("executing cmd %s (%d seconds elapsed)", cmd, int(time.Since(start).Seconds()))
|
||||||
output, stdErr := execCommandInPod(f, cmd, ns, opt)
|
output, stdErr, err := execCommandInPod(f, cmd, ns, opt)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
if stdErr != "" {
|
if stdErr != "" {
|
||||||
e2elog.Logf("failed to execute command in app pod %v", stdErr)
|
e2elog.Logf("failed to execute command in app pod %v", stdErr)
|
||||||
return false, nil
|
return false, nil
|
||||||
|
@ -113,32 +113,46 @@ func deleteSnapshot(snap *snapapi.VolumeSnapshot, t int) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func createRBDSnapshotClass(f *framework.Framework) {
|
func createRBDSnapshotClass(f *framework.Framework) error {
|
||||||
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "snapshotclass.yaml")
|
scPath := fmt.Sprintf("%s/%s", rbdExamplePath, "snapshotclass.yaml")
|
||||||
sc := getSnapshotClass(scPath)
|
sc := getSnapshotClass(scPath)
|
||||||
|
|
||||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
||||||
|
|
||||||
fsID, stdErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||||
Expect(stdErr).Should(BeEmpty())
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("failed to get fsid from ceph cluster %s", stdErr)
|
||||||
|
}
|
||||||
fsID = strings.Trim(fsID, "\n")
|
fsID = strings.Trim(fsID, "\n")
|
||||||
sc.Parameters["clusterID"] = fsID
|
sc.Parameters["clusterID"] = fsID
|
||||||
sclient, err := newSnapshotClient()
|
sclient, err := newSnapshotClient()
|
||||||
Expect(err).Should(BeNil())
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
_, err = sclient.SnapshotV1beta1().VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
_, err = sclient.SnapshotV1beta1().VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||||
Expect(err).Should(BeNil())
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func createCephFSSnapshotClass(f *framework.Framework) {
|
func createCephFSSnapshotClass(f *framework.Framework) error {
|
||||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "snapshotclass.yaml")
|
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "snapshotclass.yaml")
|
||||||
sc := getSnapshotClass(scPath)
|
sc := getSnapshotClass(scPath)
|
||||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
||||||
fsID, stdErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
fsID, stdErr, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||||
Expect(stdErr).Should(BeEmpty())
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if stdErr != "" {
|
||||||
|
return fmt.Errorf("failed to get fsid from ceph cluster %s", stdErr)
|
||||||
|
}
|
||||||
fsID = strings.Trim(fsID, "\n")
|
fsID = strings.Trim(fsID, "\n")
|
||||||
sc.Parameters["clusterID"] = fsID
|
sc.Parameters["clusterID"] = fsID
|
||||||
sclient, err := newSnapshotClient()
|
sclient, err := newSnapshotClient()
|
||||||
Expect(err).Should(BeNil())
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
_, err = sclient.SnapshotV1beta1().VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
_, err = sclient.SnapshotV1beta1().VolumeSnapshotClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||||
Expect(err).Should(BeNil())
|
return err
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,10 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
|||||||
|
|
||||||
c := f.ClientSet
|
c := f.ClientSet
|
||||||
|
|
||||||
fsID, e := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
fsID, e, err := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if e != "" {
|
if e != "" {
|
||||||
return fmt.Errorf("failed to get fsid from ceph cluster %s", e)
|
return fmt.Errorf("failed to get fsid from ceph cluster %s", e)
|
||||||
}
|
}
|
||||||
@ -101,7 +104,10 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
|||||||
// create rbd image
|
// create rbd image
|
||||||
cmd := fmt.Sprintf("rbd create %s --size=%d --image-feature=layering %s", rbdImageName, 4096, rbdOptions(defaultRBDPool))
|
cmd := fmt.Sprintf("rbd create %s --size=%d --image-feature=layering %s", rbdImageName, 4096, rbdOptions(defaultRBDPool))
|
||||||
|
|
||||||
_, e = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
_, e, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if e != "" {
|
if e != "" {
|
||||||
return fmt.Errorf("failed to create rbd image %s", e)
|
return fmt.Errorf("failed to create rbd image %s", e)
|
||||||
}
|
}
|
||||||
@ -115,7 +121,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
|||||||
|
|
||||||
pv := getStaticPV(pvName, rbdImageName, size, "csi-rbd-secret", cephCSINamespace, sc, "rbd.csi.ceph.com", isBlock, opt)
|
pv := getStaticPV(pvName, rbdImageName, size, "csi-rbd-secret", cephCSINamespace, sc, "rbd.csi.ceph.com", isBlock, opt)
|
||||||
|
|
||||||
_, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
_, err = c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("PV Create API error: %w", err)
|
return fmt.Errorf("PV Create API error: %w", err)
|
||||||
}
|
}
|
||||||
@ -155,10 +161,11 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
|||||||
}
|
}
|
||||||
|
|
||||||
cmd = fmt.Sprintf("rbd rm %s %s", rbdImageName, rbdOptions(defaultRBDPool))
|
cmd = fmt.Sprintf("rbd rm %s %s", rbdImageName, rbdOptions(defaultRBDPool))
|
||||||
execCommandInToolBoxPod(f, cmd, rookNamespace)
|
_, _, err = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||||
return nil
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// nolint:gocyclo // reduce complexity
|
||||||
func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) error {
|
func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) error {
|
||||||
opt := make(map[string]string)
|
opt := make(map[string]string)
|
||||||
var (
|
var (
|
||||||
@ -180,7 +187,10 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
|||||||
LabelSelector: "app=rook-ceph-tools",
|
LabelSelector: "app=rook-ceph-tools",
|
||||||
}
|
}
|
||||||
|
|
||||||
fsID, e := execCommandInPod(f, "ceph fsid", rookNamespace, &listOpt)
|
fsID, e, err := execCommandInPod(f, "ceph fsid", rookNamespace, &listOpt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if e != "" {
|
if e != "" {
|
||||||
return fmt.Errorf("failed to get fsid from ceph cluster %s", e)
|
return fmt.Errorf("failed to get fsid from ceph cluster %s", e)
|
||||||
}
|
}
|
||||||
@ -193,21 +203,30 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
|||||||
// create subvolumegroup, command will work even if group is already present.
|
// create subvolumegroup, command will work even if group is already present.
|
||||||
cmd := fmt.Sprintf("ceph fs subvolumegroup create %s %s", fsName, groupName)
|
cmd := fmt.Sprintf("ceph fs subvolumegroup create %s %s", fsName, groupName)
|
||||||
|
|
||||||
_, e = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if e != "" {
|
if e != "" {
|
||||||
return fmt.Errorf("failed to create subvolumegroup %s", e)
|
return fmt.Errorf("failed to create subvolumegroup %s", e)
|
||||||
}
|
}
|
||||||
|
|
||||||
// create subvolume
|
// create subvolume
|
||||||
cmd = fmt.Sprintf("ceph fs subvolume create %s %s %s --size %s", fsName, cephFsVolName, groupName, size)
|
cmd = fmt.Sprintf("ceph fs subvolume create %s %s %s --size %s", fsName, cephFsVolName, groupName, size)
|
||||||
_, e = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if e != "" {
|
if e != "" {
|
||||||
return fmt.Errorf("failed to create subvolume %s", e)
|
return fmt.Errorf("failed to create subvolume %s", e)
|
||||||
}
|
}
|
||||||
|
|
||||||
// get rootpath
|
// get rootpath
|
||||||
cmd = fmt.Sprintf("ceph fs subvolume getpath %s %s %s", fsName, cephFsVolName, groupName)
|
cmd = fmt.Sprintf("ceph fs subvolume getpath %s %s %s", fsName, cephFsVolName, groupName)
|
||||||
rootPath, e := execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
rootPath, e, err := execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if e != "" {
|
if e != "" {
|
||||||
return fmt.Errorf("failed to get rootpath %s", e)
|
return fmt.Errorf("failed to get rootpath %s", e)
|
||||||
}
|
}
|
||||||
@ -215,17 +234,22 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
|||||||
rootPath = strings.Trim(rootPath, "\n")
|
rootPath = strings.Trim(rootPath, "\n")
|
||||||
|
|
||||||
// create secret
|
// create secret
|
||||||
userID := "admin" // nolint
|
secret, err := getSecret(scPath)
|
||||||
secret := getSecret(scPath)
|
if err != nil {
|
||||||
adminKey, e := execCommandInPod(f, "ceph auth get-key client.admin", rookNamespace, &listOpt)
|
return err
|
||||||
|
}
|
||||||
|
adminKey, e, err := execCommandInPod(f, "ceph auth get-key client.admin", rookNamespace, &listOpt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if e != "" {
|
if e != "" {
|
||||||
return fmt.Errorf("failed to get adminKey %s", e)
|
return fmt.Errorf("failed to get adminKey %s", e)
|
||||||
}
|
}
|
||||||
secret.StringData["userID"] = userID
|
secret.StringData["userID"] = adminUser
|
||||||
secret.StringData["userKey"] = adminKey
|
secret.StringData["userKey"] = adminKey
|
||||||
secret.Name = secretName
|
secret.Name = secretName
|
||||||
secret.Namespace = cephCSINamespace
|
secret.Namespace = cephCSINamespace
|
||||||
_, err := c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &secret, metav1.CreateOptions{})
|
_, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &secret, metav1.CreateOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to create secret, error %w", err)
|
return fmt.Errorf("failed to create secret, error %w", err)
|
||||||
}
|
}
|
||||||
@ -280,14 +304,20 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
|||||||
|
|
||||||
// delete subvolume
|
// delete subvolume
|
||||||
cmd = fmt.Sprintf("ceph fs subvolume rm %s %s %s", fsName, cephFsVolName, groupName)
|
cmd = fmt.Sprintf("ceph fs subvolume rm %s %s %s", fsName, cephFsVolName, groupName)
|
||||||
_, e = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if e != "" {
|
if e != "" {
|
||||||
return fmt.Errorf("failed to remove sub-volume %s", e)
|
return fmt.Errorf("failed to remove sub-volume %s", e)
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete subvolume group
|
// delete subvolume group
|
||||||
cmd = fmt.Sprintf("ceph fs subvolumegroup rm %s %s", fsName, groupName)
|
cmd = fmt.Sprintf("ceph fs subvolumegroup rm %s %s", fsName, groupName)
|
||||||
_, e = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
if e != "" {
|
if e != "" {
|
||||||
return fmt.Errorf("failed to remove subvolume group %s", e)
|
return fmt.Errorf("failed to remove subvolume group %s", e)
|
||||||
}
|
}
|
||||||
|
@ -22,6 +22,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
app *v1.Pod
|
app *v1.Pod
|
||||||
// cwd stores the initial working directory.
|
// cwd stores the initial working directory.
|
||||||
cwd string
|
cwd string
|
||||||
|
err error
|
||||||
)
|
)
|
||||||
// deploy cephfs CSI
|
// deploy cephfs CSI
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
@ -30,26 +31,34 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
}
|
}
|
||||||
c = f.ClientSet
|
c = f.ClientSet
|
||||||
if cephCSINamespace != defaultNs {
|
if cephCSINamespace != defaultNs {
|
||||||
err := createNamespace(c, cephCSINamespace)
|
err = createNamespace(c, cephCSINamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create namespace with error %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// fetch current working directory to switch back
|
// fetch current working directory to switch back
|
||||||
// when we are done upgrading.
|
// when we are done upgrading.
|
||||||
var err error
|
|
||||||
cwd, err = os.Getwd()
|
cwd, err = os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to getwd with error %v", err)
|
||||||
}
|
}
|
||||||
err = upgradeAndDeployCSI(upgradeVersion, "cephfs")
|
err = upgradeAndDeployCSI(upgradeVersion, "cephfs")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to upgrade csi with error %v", err)
|
||||||
|
}
|
||||||
|
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create configmap with error %v", err)
|
||||||
|
}
|
||||||
|
err = createCephfsSecret(f.ClientSet, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create secret with error %v", err)
|
||||||
|
}
|
||||||
|
err = createCephfsStorageClass(f.ClientSet, f, true, "")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
createConfigMap(cephfsDirPath, f.ClientSet, f)
|
|
||||||
createCephfsSecret(f.ClientSet, f)
|
|
||||||
createCephfsStorageClass(f.ClientSet, f, true, "")
|
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
if !testCephFS || !upgradeTesting {
|
if !testCephFS || !upgradeTesting {
|
||||||
@ -63,15 +72,26 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
// log node plugin
|
// log node plugin
|
||||||
logsCSIPods("app=csi-cephfsplugin", c)
|
logsCSIPods("app=csi-cephfsplugin", c)
|
||||||
}
|
}
|
||||||
deleteConfigMap(cephfsDirPath)
|
err = deleteConfigMap(cephfsDirPath)
|
||||||
deleteResource(cephfsExamplePath + "secret.yaml")
|
if err != nil {
|
||||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(cephfsExamplePath + "secret.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete secret with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
|
}
|
||||||
if deployCephFS {
|
if deployCephFS {
|
||||||
deleteCephfsPlugin()
|
deleteCephfsPlugin()
|
||||||
if cephCSINamespace != defaultNs {
|
if cephCSINamespace != defaultNs {
|
||||||
err := deleteNamespace(c, cephCSINamespace)
|
err := deleteNamespace(c, cephCSINamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete namespace with error %v", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -83,13 +103,13 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
By("checking provisioner deployment is running")
|
By("checking provisioner deployment is running")
|
||||||
err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("checking nodeplugin deamonsets is running")
|
By("checking nodeplugin deamonset pods are running")
|
||||||
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephfsDeamonSetName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
By("upgrade to latest changes and verify app re-mount", func() {
|
By("upgrade to latest changes and verify app re-mount", func() {
|
||||||
@ -100,32 +120,31 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
|
|
||||||
pvc, err = loadPVC(pvcPath)
|
pvc, err = loadPVC(pvcPath)
|
||||||
if pvc == nil {
|
if pvc == nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load pvc with error %v", err)
|
||||||
}
|
}
|
||||||
pvc.Namespace = f.UniqueName
|
pvc.Namespace = f.UniqueName
|
||||||
e2elog.Logf("The PVC template %+v", pvc)
|
|
||||||
|
|
||||||
app, err = loadApp(appPath)
|
app, err = loadApp(appPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load application with error %v", err)
|
||||||
}
|
}
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
app.Labels = map[string]string{"app": "upgrade-testing"}
|
app.Labels = map[string]string{"app": "upgrade-testing"}
|
||||||
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||||
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create pvc and application with error %v", err)
|
||||||
}
|
}
|
||||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete application with error %v", err)
|
||||||
}
|
}
|
||||||
deleteCephfsPlugin()
|
deleteCephfsPlugin()
|
||||||
|
|
||||||
// switch back to current changes.
|
// switch back to current changes.
|
||||||
err = os.Chdir(cwd)
|
err = os.Chdir(cwd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to d chdir with error %v", err)
|
||||||
}
|
}
|
||||||
deployCephfsPlugin()
|
deployCephfsPlugin()
|
||||||
|
|
||||||
@ -134,7 +153,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
// an earlier release.
|
// an earlier release.
|
||||||
err = createApp(f.ClientSet, app, deployTimeout)
|
err = createApp(f.ClientSet, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create application with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -144,7 +163,6 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
v, err = f.ClientSet.Discovery().ServerVersion()
|
v, err = f.ClientSet.Discovery().ServerVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to get server version with error %v", err)
|
e2elog.Logf("failed to get server version with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
// Resize 0.3.0 is only supported from v1.15+
|
// Resize 0.3.0 is only supported from v1.15+
|
||||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") {
|
if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") {
|
||||||
@ -153,23 +171,23 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
}
|
}
|
||||||
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to get pvc with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// resize PVC
|
// resize PVC
|
||||||
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
|
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to expand pvc with error %v", err)
|
||||||
}
|
}
|
||||||
// wait for application pod to come up after resize
|
// wait for application pod to come up after resize
|
||||||
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("timout waiting for pod to be in running state with error %v", err)
|
||||||
}
|
}
|
||||||
// validate if resize is successful.
|
// validate if resize is successful.
|
||||||
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to check directory size with error %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -178,7 +196,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
By("delete pvc and app")
|
By("delete pvc and app")
|
||||||
err = deletePVCAndApp("", f, pvc, app)
|
err = deletePVCAndApp("", f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete pvc and application with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
@ -32,28 +32,44 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
|||||||
if cephCSINamespace != defaultNs {
|
if cephCSINamespace != defaultNs {
|
||||||
err := createNamespace(c, cephCSINamespace)
|
err := createNamespace(c, cephCSINamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create namespace with error %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
createNodeLabel(f, nodeRegionLabel, regionValue)
|
|
||||||
createNodeLabel(f, nodeZoneLabel, zoneValue)
|
|
||||||
|
|
||||||
// fetch current working directory to switch back
|
// fetch current working directory to switch back
|
||||||
// when we are done upgrading.
|
// when we are done upgrading.
|
||||||
var err error
|
var err error
|
||||||
cwd, err = os.Getwd()
|
cwd, err = os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to do getwd with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
deployVault(f.ClientSet, deployTimeout)
|
deployVault(f.ClientSet, deployTimeout)
|
||||||
err = upgradeAndDeployCSI(upgradeVersion, "rbd")
|
err = upgradeAndDeployCSI(upgradeVersion, "rbd")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to upgrade and deploy CSI with error %v", err)
|
||||||
|
}
|
||||||
|
err = createConfigMap(rbdDirPath, f.ClientSet, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create configmap with error %v", err)
|
||||||
|
}
|
||||||
|
err = createRBDStorageClass(f.ClientSet, f, nil, nil)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||||
|
}
|
||||||
|
err = createRBDSecret(f.ClientSet, f)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create secret with error %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = createNodeLabel(f, nodeRegionLabel, regionValue)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create node label with error %v", err)
|
||||||
|
}
|
||||||
|
err = createNodeLabel(f, nodeZoneLabel, zoneValue)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to create node label with error %v", err)
|
||||||
}
|
}
|
||||||
createConfigMap(rbdDirPath, f.ClientSet, f)
|
|
||||||
createRBDStorageClass(f.ClientSet, f, nil, nil)
|
|
||||||
createRBDSecret(f.ClientSet, f)
|
|
||||||
})
|
})
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
if !testRBD || !upgradeTesting {
|
if !testRBD || !upgradeTesting {
|
||||||
@ -68,21 +84,36 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
|||||||
logsCSIPods("app=csi-rbdplugin", c)
|
logsCSIPods("app=csi-rbdplugin", c)
|
||||||
}
|
}
|
||||||
|
|
||||||
deleteConfigMap(rbdDirPath)
|
err := deleteConfigMap(rbdDirPath)
|
||||||
deleteResource(rbdExamplePath + "secret.yaml")
|
if err != nil {
|
||||||
deleteResource(rbdExamplePath + "storageclass.yaml")
|
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(rbdExamplePath + "secret.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete secret with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteResource(rbdExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
|
}
|
||||||
deleteVault()
|
deleteVault()
|
||||||
if deployRBD {
|
if deployRBD {
|
||||||
deleteRBDPlugin()
|
deleteRBDPlugin()
|
||||||
if cephCSINamespace != defaultNs {
|
if cephCSINamespace != defaultNs {
|
||||||
err := deleteNamespace(c, cephCSINamespace)
|
err = deleteNamespace(c, cephCSINamespace)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete namespace with error %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
deleteNodeLabel(c, nodeRegionLabel)
|
err = deleteNodeLabel(c, nodeRegionLabel)
|
||||||
deleteNodeLabel(c, nodeZoneLabel)
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete node label with error %v", err)
|
||||||
|
}
|
||||||
|
err = deleteNodeLabel(c, nodeZoneLabel)
|
||||||
|
if err != nil {
|
||||||
|
e2elog.Failf("failed to delete node label with error %v", err)
|
||||||
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
Context("Test RBD CSI", func() {
|
Context("Test RBD CSI", func() {
|
||||||
@ -93,14 +124,14 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
|||||||
By("checking provisioner deployment is running", func() {
|
By("checking provisioner deployment is running", func() {
|
||||||
err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err := waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("timeout waiting for deployment %s with error %v", rbdDeploymentName, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("checking nodeplugin deamonsets is running", func() {
|
By("checking nodeplugin deamonset pods are running", func() {
|
||||||
err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err := waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("timeout waiting for daemonset %s with error %v", rbdDaemonsetName, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -110,31 +141,30 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
|||||||
var err error
|
var err error
|
||||||
pvc, err = loadPVC(pvcPath)
|
pvc, err = loadPVC(pvcPath)
|
||||||
if pvc == nil {
|
if pvc == nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load pvc with error %v", err)
|
||||||
}
|
}
|
||||||
pvc.Namespace = f.UniqueName
|
pvc.Namespace = f.UniqueName
|
||||||
e2elog.Logf("The PVC template %+v", pvc)
|
|
||||||
|
|
||||||
app, err = loadApp(appPath)
|
app, err = loadApp(appPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to load application with error %v", err)
|
||||||
}
|
}
|
||||||
app.Namespace = f.UniqueName
|
app.Namespace = f.UniqueName
|
||||||
app.Labels = map[string]string{"app": "upgrade-testing"}
|
app.Labels = map[string]string{"app": "upgrade-testing"}
|
||||||
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
pvc.Spec.Resources.Requests[v1.ResourceStorage] = resource.MustParse(pvcSize)
|
||||||
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
err = createPVCAndApp("", f, pvc, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create pvc with error %v", err)
|
||||||
}
|
}
|
||||||
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
err = deletePod(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete application with error %v", err)
|
||||||
}
|
}
|
||||||
deleteRBDPlugin()
|
deleteRBDPlugin()
|
||||||
|
|
||||||
err = os.Chdir(cwd)
|
err = os.Chdir(cwd)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to change directory with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
deployRBDPlugin()
|
deployRBDPlugin()
|
||||||
@ -143,7 +173,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
|||||||
app.Labels = map[string]string{"app": "upgrade-testing"}
|
app.Labels = map[string]string{"app": "upgrade-testing"}
|
||||||
err = createApp(f.ClientSet, app, deployTimeout)
|
err = createApp(f.ClientSet, app, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to create application with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -153,7 +183,6 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
|||||||
v, err := f.ClientSet.Discovery().ServerVersion()
|
v, err := f.ClientSet.Discovery().ServerVersion()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Logf("failed to get server version with error %v", err)
|
e2elog.Logf("failed to get server version with error %v", err)
|
||||||
Fail(err.Error())
|
|
||||||
}
|
}
|
||||||
// Resize 0.3.0 is only supported from v1.15+
|
// Resize 0.3.0 is only supported from v1.15+
|
||||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") {
|
if v.Major > "1" || (v.Major == "1" && v.Minor >= "15") {
|
||||||
@ -162,23 +191,23 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
|||||||
}
|
}
|
||||||
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(pvc.Namespace).Get(context.TODO(), pvc.Name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to get pvc with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// resize PVC
|
// resize PVC
|
||||||
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
|
err = expandPVCSize(f.ClientSet, pvc, pvcExpandSize, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to expand pvc with error %v", err)
|
||||||
}
|
}
|
||||||
// wait for application pod to come up after resize
|
// wait for application pod to come up after resize
|
||||||
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
err = waitForPodInRunningState(app.Name, app.Namespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("timeout waiting for pod to be in running state with error %v", err)
|
||||||
}
|
}
|
||||||
// validate if resize is successful.
|
// validate if resize is successful.
|
||||||
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
err = checkDirSize(app, f, &opt, pvcExpandSize)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to check directory size with error %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -187,7 +216,7 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
|||||||
By("delete pvc and app", func() {
|
By("delete pvc and app", func() {
|
||||||
err := deletePVCAndApp("", f, pvc, app)
|
err := deletePVCAndApp("", f, pvc, app)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
Fail(err.Error())
|
e2elog.Failf("failed to delete pvc and application with error %v", err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
1018
e2e/utils.go
1018
e2e/utils.go
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user