mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
cleanup: rework on naming conventions
This commits replaces cephfs -> cephFS to maintain consistency throughout the codebase Updates: #1465 Signed-off-by: Yati Padia <ypadia@redhat.com>
This commit is contained in:
@ -3,6 +3,7 @@
|
||||
- [End-to-End Testing](#end-to-end-testing)
|
||||
- [Introduction](#introduction)
|
||||
- [Install Kubernetes](#install-kubernetes)
|
||||
- [Deploy Rook](#deploy-rook)
|
||||
- [Test parameters](#test-parameters)
|
||||
- [E2E for snapshot](#e2e-for-snapshot)
|
||||
- [Running E2E](#running-e2e)
|
||||
@ -89,9 +90,9 @@ are available while running tests:
|
||||
| flag | description |
|
||||
| ----------------- | ----------------------------------------------------------------------------- |
|
||||
| deploy-timeout | Timeout to wait for created kubernetes resources (default: 10 minutes) |
|
||||
| deploy-cephfs | Deploy cephfs csi driver as part of E2E (default: true) |
|
||||
| deploy-cephfs | Deploy cephFS csi driver as part of E2E (default: true) |
|
||||
| deploy-rbd | Deploy rbd csi driver as part of E2E (default: true) |
|
||||
| test-cephfs | Test cephfs csi driver as part of E2E (default: true) |
|
||||
| test-cephfs | Test cephFS csi driver as part of E2E (default: true) |
|
||||
| upgrade-testing | Perform upgrade testing (default: false) |
|
||||
| upgrade-version | Target version for upgrade testing (default: "v3.3.1") |
|
||||
| test-rbd | Test rbd csi driver as part of E2E (default: true) |
|
||||
|
120
e2e/cephfs.go
120
e2e/cephfs.go
@ -18,17 +18,17 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
cephfsProvisioner = "csi-cephfsplugin-provisioner.yaml"
|
||||
cephfsProvisionerRBAC = "csi-provisioner-rbac.yaml"
|
||||
cephfsProvisionerPSP = "csi-provisioner-psp.yaml"
|
||||
cephfsNodePlugin = "csi-cephfsplugin.yaml"
|
||||
cephfsNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
|
||||
cephfsNodePluginPSP = "csi-nodeplugin-psp.yaml"
|
||||
cephfsDeploymentName = "csi-cephfsplugin-provisioner"
|
||||
cephfsDeamonSetName = "csi-cephfsplugin"
|
||||
cephfsContainerName = "csi-cephfsplugin"
|
||||
cephfsDirPath = "../deploy/cephfs/kubernetes/"
|
||||
cephfsExamplePath = examplePath + "cephfs/"
|
||||
cephFSProvisioner = "csi-cephfsplugin-provisioner.yaml"
|
||||
cephFSProvisionerRBAC = "csi-provisioner-rbac.yaml"
|
||||
cephFSProvisionerPSP = "csi-provisioner-psp.yaml"
|
||||
cephFSNodePlugin = "csi-cephfsplugin.yaml"
|
||||
cephFSNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
|
||||
cephFSNodePluginPSP = "csi-nodeplugin-psp.yaml"
|
||||
cephFSDeploymentName = "csi-cephfsplugin-provisioner"
|
||||
cephFSDeamonSetName = "csi-cephfsplugin"
|
||||
cephFSContainerName = "csi-cephfsplugin"
|
||||
cephFSDirPath = "../deploy/cephfs/kubernetes/"
|
||||
cephFSExamplePath = examplePath + "cephfs/"
|
||||
subvolumegroup = "e2e"
|
||||
fileSystemName = "myfs"
|
||||
)
|
||||
@ -36,23 +36,23 @@ var (
|
||||
func deployCephfsPlugin() {
|
||||
// delete objects deployed by rook
|
||||
|
||||
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
||||
data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||
e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||
}
|
||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
||||
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||
e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||
}
|
||||
|
||||
createORDeleteCephfsResources(kubectlCreate)
|
||||
@ -63,12 +63,12 @@ func deleteCephfsPlugin() {
|
||||
}
|
||||
|
||||
func createORDeleteCephfsResources(action kubectlAction) {
|
||||
csiDriver, err := ioutil.ReadFile(cephfsDirPath + csiDriverObject)
|
||||
csiDriver, err := ioutil.ReadFile(cephFSDirPath + csiDriverObject)
|
||||
if err != nil {
|
||||
// createORDeleteRbdResources is used for upgrade testing as csidriverObject is
|
||||
// newly added, discarding file not found error.
|
||||
if !os.IsNotExist(err) {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+csiDriverObject, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+csiDriverObject, err)
|
||||
}
|
||||
} else {
|
||||
err = retryKubectlInput(cephCSINamespace, action, string(csiDriver), deployTimeout)
|
||||
@ -89,55 +89,55 @@ func createORDeleteCephfsResources(action kubectlAction) {
|
||||
e2elog.Failf("failed to %s ceph-conf configmap object with error %v", action, err)
|
||||
}
|
||||
}
|
||||
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisioner)
|
||||
data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisioner)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisioner, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisioner, err)
|
||||
}
|
||||
data = oneReplicaDeployYaml(data)
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS provisioner with error %v", action, err)
|
||||
}
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC)
|
||||
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS provisioner rbac with error %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerPSP)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerPSP)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerPSP, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerPSP, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS provisioner psp with error %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePlugin)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePlugin)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePlugin, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePlugin, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS nodeplugin with error %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to %s CephFS nodeplugin rbac with error %v", action, err)
|
||||
}
|
||||
|
||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginPSP)
|
||||
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginPSP)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginPSP, err)
|
||||
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginPSP, err)
|
||||
}
|
||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||
if err != nil {
|
||||
@ -200,7 +200,7 @@ var _ = Describe("cephfs", func() {
|
||||
}
|
||||
deployCephfsPlugin()
|
||||
}
|
||||
err := createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||
err := createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
@ -239,7 +239,7 @@ var _ = Describe("cephfs", func() {
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
}
|
||||
err := deleteConfigMap(cephfsDirPath)
|
||||
err := deleteConfigMap(cephFSDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||
}
|
||||
@ -255,7 +255,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node secret with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -272,25 +272,25 @@ var _ = Describe("cephfs", func() {
|
||||
|
||||
Context("Test CephFS CSI", func() {
|
||||
It("Test CephFS CSI", func() {
|
||||
pvcPath := cephfsExamplePath + "pvc.yaml"
|
||||
appPath := cephfsExamplePath + "pod.yaml"
|
||||
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
|
||||
pvcSmartClonePath := cephfsExamplePath + "pvc-clone.yaml"
|
||||
appClonePath := cephfsExamplePath + "pod-restore.yaml"
|
||||
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
|
||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
||||
pvcPath := cephFSExamplePath + "pvc.yaml"
|
||||
appPath := cephFSExamplePath + "pod.yaml"
|
||||
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
|
||||
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
|
||||
appClonePath := cephFSExamplePath + "pod-restore.yaml"
|
||||
appSmartClonePath := cephFSExamplePath + "pod-clone.yaml"
|
||||
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err := waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err)
|
||||
}
|
||||
})
|
||||
|
||||
By("checking nodeplugin deamonset pods are running", func() {
|
||||
err := waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err := waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error %v", cephfsDeamonSetName, err)
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error %v", cephFSDeamonSetName, err)
|
||||
}
|
||||
})
|
||||
|
||||
@ -302,11 +302,11 @@ var _ = Describe("cephfs", func() {
|
||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||
}
|
||||
// Deleting the storageclass and secret created by helm
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "secret.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "secret.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
@ -314,7 +314,7 @@ var _ = Describe("cephfs", func() {
|
||||
}
|
||||
|
||||
By("check static PVC", func() {
|
||||
scPath := cephfsExamplePath + "secret.yaml"
|
||||
scPath := cephFSExamplePath + "secret.yaml"
|
||||
err := validateCephFsStaticPV(f, appPath, scPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate CephFS static pv with error %v", err)
|
||||
@ -330,7 +330,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
@ -378,7 +378,7 @@ var _ = Describe("cephfs", func() {
|
||||
e2elog.Failf("failed to delete PVC with error %v", err)
|
||||
}
|
||||
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -399,7 +399,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
@ -418,7 +418,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||
}
|
||||
@ -529,7 +529,7 @@ var _ = Describe("cephfs", func() {
|
||||
})
|
||||
|
||||
By("validate multiple subvolumegroup creation", func() {
|
||||
err := deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -538,7 +538,7 @@ var _ = Describe("cephfs", func() {
|
||||
"clusterID-1": "subvolgrp1",
|
||||
"clusterID-2": "subvolgrp2",
|
||||
}
|
||||
err = createCustomConfigMap(f.ClientSet, cephfsDirPath, subvolgrpInfo)
|
||||
err = createCustomConfigMap(f.ClientSet, cephFSDirPath, subvolgrpInfo)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
@ -553,7 +553,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -574,7 +574,7 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -582,11 +582,11 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to validate subvolume group with error %v", err)
|
||||
}
|
||||
err = deleteConfigMap(cephfsDirPath)
|
||||
err = deleteConfigMap(cephFSDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||
}
|
||||
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
@ -703,7 +703,7 @@ var _ = Describe("cephfs", func() {
|
||||
e2elog.Failf("failed to delete PVC with error %v", err)
|
||||
}
|
||||
|
||||
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
|
||||
}
|
||||
@ -782,7 +782,7 @@ var _ = Describe("cephfs", func() {
|
||||
e2elog.Failf("failed to delete snapshot (%s): %v", f.UniqueName, err)
|
||||
}
|
||||
|
||||
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ func createCephfsStorageClass(
|
||||
f *framework.Framework,
|
||||
enablePool bool,
|
||||
params map[string]string) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml")
|
||||
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "storageclass.yaml")
|
||||
sc, err := getStorageClass(scPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -90,7 +90,7 @@ func createCephfsStorageClass(
|
||||
}
|
||||
|
||||
func createCephfsSecret(f *framework.Framework, secretName, userName, userKey string) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "secret.yaml")
|
||||
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "secret.yaml")
|
||||
sc, err := getSecret(scPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -131,9 +131,9 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
|
||||
_, stdErr, err := execCommandInDaemonsetPod(
|
||||
f,
|
||||
cmd,
|
||||
cephfsDeamonSetName,
|
||||
cephFSDeamonSetName,
|
||||
pod.Spec.NodeName,
|
||||
cephfsContainerName,
|
||||
cephFSContainerName,
|
||||
cephCSINamespace)
|
||||
if stdErr != "" {
|
||||
e2elog.Logf("StdErr occurred: %s", stdErr)
|
||||
|
@ -18,9 +18,9 @@ func init() {
|
||||
log.SetOutput(GinkgoWriter)
|
||||
|
||||
flag.IntVar(&deployTimeout, "deploy-timeout", 10, "timeout to wait for created kubernetes resources")
|
||||
flag.BoolVar(&deployCephFS, "deploy-cephfs", true, "deploy cephfs csi driver")
|
||||
flag.BoolVar(&deployCephFS, "deploy-cephfs", true, "deploy cephFS csi driver")
|
||||
flag.BoolVar(&deployRBD, "deploy-rbd", true, "deploy rbd csi driver")
|
||||
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephfs csi driver")
|
||||
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver")
|
||||
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
|
||||
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
|
||||
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")
|
||||
|
@ -649,9 +649,9 @@ func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeCla
|
||||
return err
|
||||
}
|
||||
|
||||
func deletePool(name string, cephfs bool, f *framework.Framework) error {
|
||||
func deletePool(name string, cephFS bool, f *framework.Framework) error {
|
||||
cmds := []string{}
|
||||
if cephfs {
|
||||
if cephFS {
|
||||
// ceph fs fail
|
||||
// ceph fs rm myfs --yes-i-really-mean-it
|
||||
// ceph osd pool delete myfs-metadata myfs-metadata
|
||||
|
@ -171,7 +171,7 @@ func deleteRBDSnapshotClass() error {
|
||||
}
|
||||
|
||||
func createCephFSSnapshotClass(f *framework.Framework) error {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "snapshotclass.yaml")
|
||||
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "snapshotclass.yaml")
|
||||
sc := getSnapshotClass(scPath)
|
||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-name"] = cephFSProvisionerSecretName
|
||||
|
@ -37,7 +37,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
appKey = "app"
|
||||
appLabel = "cephfs-upgrade-testing"
|
||||
)
|
||||
// deploy cephfs CSI
|
||||
// deploy cephFS CSI
|
||||
BeforeEach(func() {
|
||||
if !upgradeTesting || !testCephFS {
|
||||
Skip("Skipping CephFS Upgrade Test")
|
||||
@ -60,7 +60,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to upgrade csi with error %v", err)
|
||||
}
|
||||
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create configmap with error %v", err)
|
||||
}
|
||||
@ -108,7 +108,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
// log all details from the namespace where Ceph-CSI is deployed
|
||||
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||
}
|
||||
err = deleteConfigMap(cephfsDirPath)
|
||||
err = deleteConfigMap(cephFSDirPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||
}
|
||||
@ -124,11 +124,11 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete node secret with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
|
||||
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
@ -148,22 +148,22 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
Context("Cephfs Upgrade Test", func() {
|
||||
It("Cephfs Upgrade Test", func() {
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
|
||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err)
|
||||
}
|
||||
})
|
||||
By("checking nodeplugin deamonset pods are running", func() {
|
||||
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephfsDeamonSetName, err)
|
||||
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephFSDeamonSetName, err)
|
||||
}
|
||||
})
|
||||
|
||||
By("upgrade to latest changes and verify app re-mount", func() {
|
||||
// TODO: fetch pvc size from spec.
|
||||
pvcPath := cephfsExamplePath + "pvc.yaml"
|
||||
appPath := cephfsExamplePath + "pod.yaml"
|
||||
pvcPath := cephFSExamplePath + "pvc.yaml"
|
||||
appPath := cephFSExamplePath + "pod.yaml"
|
||||
data := "check data persists"
|
||||
label := make(map[string]string)
|
||||
|
||||
@ -218,7 +218,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
// pvc clone is only supported from v1.16+
|
||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 17) {
|
||||
// Create snapshot of the pvc
|
||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
||||
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Name = "cephfs-pvc-snapshot"
|
||||
snap.Namespace = f.UniqueName
|
||||
@ -241,14 +241,14 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
}
|
||||
deployCephfsPlugin()
|
||||
|
||||
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", cephfsDeploymentName, err)
|
||||
e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", cephFSDeploymentName, err)
|
||||
}
|
||||
|
||||
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", cephfsDeamonSetName, err)
|
||||
e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", cephFSDeamonSetName, err)
|
||||
}
|
||||
|
||||
app.Labels = label
|
||||
@ -261,8 +261,8 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
})
|
||||
|
||||
By("Create clone from a snapshot", func() {
|
||||
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
|
||||
appClonePath := cephfsExamplePath + "pod-restore.yaml"
|
||||
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
|
||||
appClonePath := cephFSExamplePath + "pod-restore.yaml"
|
||||
label := make(map[string]string)
|
||||
|
||||
// pvc clone is only supported from v1.16+
|
||||
@ -310,7 +310,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
}
|
||||
|
||||
// Delete the snapshot of the parent pvc.
|
||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
||||
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Name = "cephfs-pvc-snapshot"
|
||||
snap.Namespace = f.UniqueName
|
||||
@ -324,8 +324,8 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
})
|
||||
|
||||
By("Create clone from existing PVC", func() {
|
||||
pvcSmartClonePath := cephfsExamplePath + "pvc-clone.yaml"
|
||||
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
|
||||
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
|
||||
appSmartClonePath := cephFSExamplePath + "pod-clone.yaml"
|
||||
label := make(map[string]string)
|
||||
|
||||
// pvc clone is only supported from v1.16+
|
||||
|
10
e2e/utils.go
10
e2e/utils.go
@ -422,7 +422,7 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framework) error {
|
||||
func pvcDeleteWhenPoolNotFound(pvcPath string, cephFS bool, f *framework.Framework) error {
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -433,13 +433,13 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if cephfs {
|
||||
if cephFS {
|
||||
err = deleteBackingCephFSVolume(f, pvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// delete cephfs filesystem
|
||||
err = deletePool("myfs", cephfs, f)
|
||||
// delete cephFS filesystem
|
||||
err = deletePool("myfs", cephFS, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -449,7 +449,7 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo
|
||||
return err
|
||||
}
|
||||
// delete rbd pool
|
||||
err = deletePool(defaultRBDPool, cephfs, f)
|
||||
err = deletePool(defaultRBDPool, cephFS, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
Reference in New Issue
Block a user