cleanup: rework on naming conventions

This commits replaces cephfs -> cephFS
to maintain consistency throughout the
codebase

Updates: #1465

Signed-off-by: Yati Padia <ypadia@redhat.com>
This commit is contained in:
Yati Padia 2021-09-20 15:46:55 +05:30 committed by mergify[bot]
parent 34a21cdbe3
commit 1cf14cd83c
14 changed files with 114 additions and 113 deletions

View File

@ -13,7 +13,7 @@ A clear and concise description of what the bug is.
- Image/version of Ceph CSI driver :
- Helm chart version :
- Kernel version :
- Mounter used for mounting PVC (for cephfs its `fuse` or `kernel`. for rbd its
- Mounter used for mounting PVC (for cephFS its `fuse` or `kernel`. for rbd its
`krbd` or `rbd-nbd`) :
- Kubernetes cluster version :
- Ceph cluster version :
@ -61,7 +61,7 @@ If the issue is in PVC mounting please attach complete logs of below containers.
- if required attach dmesg logs.
**Note:-** If its a rbd issue please provide only rbd related logs, if its a
cephfs issue please provide cephfs logs.
cephFS issue please provide cephFS logs.
# Additional context #

View File

@ -35,7 +35,7 @@ Independent CSI plugins are provided to support RBD and CephFS backed volumes,
- For details about configuration and deployment of RBD plugin, please refer
[rbd doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md) and
for CephFS plugin configuration and deployment please
refer [cephfs doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-cephfs.md).
refer [cephFS doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-cephfs.md).
- For example usage of RBD and CephFS CSI plugins, see examples in `examples/`.
- Stale resource cleanup, please refer [cleanup doc](docs/resource-cleanup.md).

View File

@ -1,6 +1,6 @@
# ceph-csi-cephfs
The ceph-csi-cephfs chart adds cephfs volume support to your cluster.
The ceph-csi-cephfs chart adds cephFS volume support to your cluster.
## Install from release repo
@ -134,12 +134,12 @@ charts and their default values.
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
| `storageClass.create` | Specifies whether the StorageClass should be created | `false` |
| `storageClass.name` | Specifies the cephfs StorageClass name | `csi-cephfs-sc` |
| `storageClass.name` | Specifies the cephFS StorageClass name | `csi-cephfs-sc` |
| `storageClass.clusterID` | String representing a Ceph cluster to provision storage from | `<cluster-ID>` |
| `storageClass.fsName` | CephFS filesystem name into which the volume shall be created | `myfs` |
| `storageClass.pool` | Ceph pool into which volume data shall be stored | `""` |
| `storageClass.fuseMountOptions` | Comma separated string of Ceph-fuse mount options | `""` |
| `storageclass.kernelMountOptions` | Comma separated string of Cephfs kernel mount options | `""` |
| `storageclass.kernelMountOptions` | Comma separated string of CephFS kernel mount options | `""` |
| `storageClass.mounter` | The driver can use either ceph-fuse (fuse) or ceph kernelclient (kernel) | `""` |
| `storageClass.volumeNamePrefix` | Prefix to use for naming subvolumes | `""` |
| `storageClass.provisionerSecret` | The secrets have to contain user and/or Ceph admin credentials. | `csi-cephfs-secret` |
@ -152,8 +152,8 @@ charts and their default values.
| `storageClass.allowVolumeExpansion` | Specifies whether volume expansion should be allowed | `true` |
| `storageClass.mountOptions` | Specifies the mount options | `[]` |
| `secret.create` | Specifies whether the secret should be created | `false` |
| `secret.name` | Specifies the cephfs secret name | `csi-cephfs-secret` |
| `secret.adminID` | Specifies the admin ID of the cephfs secret | `<plaintext ID>` |
| `secret.name` | Specifies the cephFS secret name | `csi-cephfs-secret` |
| `secret.adminID` | Specifies the admin ID of the cephFS secret | `<plaintext ID>` |
| `secret.adminKey` | Specifies the key that corresponds to the adminID | `<Ceph auth key corresponding to ID above>` |
### Command Line

View File

@ -36,12 +36,12 @@ import (
const (
rbdType = "rbd"
cephfsType = "cephfs"
cephFSType = "cephfs"
livenessType = "liveness"
controllerType = "controller"
rbdDefaultName = "rbd.csi.ceph.com"
cephfsDefaultName = "cephfs.csi.ceph.com"
cephFSDefaultName = "cephfs.csi.ceph.com"
livenessDefaultName = "liveness.csi.ceph.com"
pollTime = 60 // seconds
@ -144,8 +144,8 @@ func getDriverName() string {
switch conf.Vtype {
case rbdType:
return rbdDefaultName
case cephfsType:
return cephfsDefaultName
case cephFSType:
return cephFSDefaultName
case livenessType:
return livenessDefaultName
default:
@ -222,7 +222,7 @@ func main() {
driver := rbd.NewDriver()
driver.Run(&conf)
case cephfsType:
case cephFSType:
driver := cephfs.NewDriver()
driver.Run(&conf)

View File

@ -3,7 +3,7 @@
If the PVC is created with storage class which is having the `reclaimPolicy`
as `Retain` will not delete the PV object, backend omap metadata and backend image.
Manual deletion of PV will result in stale omap keys, values,
cephfs subvolume and rbd image.
cephFS subvolume and rbd image.
It is required to cleanup metadata and image separately.
## Steps
@ -67,7 +67,7 @@ a. remove rbd image(csi-vol-omapval, the prefix csi-vol is value of [volumeNameP
Removing image: 100% complete...done.
```
b. remove cephfs subvolume(csi-vol-omapval)
b. remove cephFS subvolume(csi-vol-omapval)
```
ceph fs subvolume rm volume_name subvolume_name group_name

View File

@ -3,6 +3,7 @@
- [End-to-End Testing](#end-to-end-testing)
- [Introduction](#introduction)
- [Install Kubernetes](#install-kubernetes)
- [Deploy Rook](#deploy-rook)
- [Test parameters](#test-parameters)
- [E2E for snapshot](#e2e-for-snapshot)
- [Running E2E](#running-e2e)
@ -89,9 +90,9 @@ are available while running tests:
| flag | description |
| ----------------- | ----------------------------------------------------------------------------- |
| deploy-timeout | Timeout to wait for created kubernetes resources (default: 10 minutes) |
| deploy-cephfs | Deploy cephfs csi driver as part of E2E (default: true) |
| deploy-cephfs | Deploy cephFS csi driver as part of E2E (default: true) |
| deploy-rbd | Deploy rbd csi driver as part of E2E (default: true) |
| test-cephfs | Test cephfs csi driver as part of E2E (default: true) |
| test-cephfs | Test cephFS csi driver as part of E2E (default: true) |
| upgrade-testing | Perform upgrade testing (default: false) |
| upgrade-version | Target version for upgrade testing (default: "v3.3.1") |
| test-rbd | Test rbd csi driver as part of E2E (default: true) |

View File

@ -18,17 +18,17 @@ import (
)
var (
cephfsProvisioner = "csi-cephfsplugin-provisioner.yaml"
cephfsProvisionerRBAC = "csi-provisioner-rbac.yaml"
cephfsProvisionerPSP = "csi-provisioner-psp.yaml"
cephfsNodePlugin = "csi-cephfsplugin.yaml"
cephfsNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
cephfsNodePluginPSP = "csi-nodeplugin-psp.yaml"
cephfsDeploymentName = "csi-cephfsplugin-provisioner"
cephfsDeamonSetName = "csi-cephfsplugin"
cephfsContainerName = "csi-cephfsplugin"
cephfsDirPath = "../deploy/cephfs/kubernetes/"
cephfsExamplePath = examplePath + "cephfs/"
cephFSProvisioner = "csi-cephfsplugin-provisioner.yaml"
cephFSProvisionerRBAC = "csi-provisioner-rbac.yaml"
cephFSProvisionerPSP = "csi-provisioner-psp.yaml"
cephFSNodePlugin = "csi-cephfsplugin.yaml"
cephFSNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
cephFSNodePluginPSP = "csi-nodeplugin-psp.yaml"
cephFSDeploymentName = "csi-cephfsplugin-provisioner"
cephFSDeamonSetName = "csi-cephfsplugin"
cephFSContainerName = "csi-cephfsplugin"
cephFSDirPath = "../deploy/cephfs/kubernetes/"
cephFSExamplePath = examplePath + "cephfs/"
subvolumegroup = "e2e"
fileSystemName = "myfs"
)
@ -36,23 +36,23 @@ var (
func deployCephfsPlugin() {
// delete objects deployed by rook
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC)
if err != nil {
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
}
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
if err != nil {
e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
}
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC)
if err != nil {
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
}
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
if err != nil {
e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
}
createORDeleteCephfsResources(kubectlCreate)
@ -63,12 +63,12 @@ func deleteCephfsPlugin() {
}
func createORDeleteCephfsResources(action kubectlAction) {
csiDriver, err := ioutil.ReadFile(cephfsDirPath + csiDriverObject)
csiDriver, err := ioutil.ReadFile(cephFSDirPath + csiDriverObject)
if err != nil {
// createORDeleteRbdResources is used for upgrade testing as csidriverObject is
// newly added, discarding file not found error.
if !os.IsNotExist(err) {
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+csiDriverObject, err)
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+csiDriverObject, err)
}
} else {
err = retryKubectlInput(cephCSINamespace, action, string(csiDriver), deployTimeout)
@ -89,55 +89,55 @@ func createORDeleteCephfsResources(action kubectlAction) {
e2elog.Failf("failed to %s ceph-conf configmap object with error %v", action, err)
}
}
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisioner)
data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisioner)
if err != nil {
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisioner, err)
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisioner, err)
}
data = oneReplicaDeployYaml(data)
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
if err != nil {
e2elog.Failf("failed to %s CephFS provisioner with error %v", action, err)
}
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC)
if err != nil {
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
}
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
if err != nil {
e2elog.Failf("failed to %s CephFS provisioner rbac with error %v", action, err)
}
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerPSP)
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerPSP)
if err != nil {
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerPSP, err)
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerPSP, err)
}
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
if err != nil {
e2elog.Failf("failed to %s CephFS provisioner psp with error %v", action, err)
}
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePlugin)
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePlugin)
if err != nil {
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePlugin, err)
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePlugin, err)
}
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
if err != nil {
e2elog.Failf("failed to %s CephFS nodeplugin with error %v", action, err)
}
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC)
if err != nil {
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
}
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
if err != nil {
e2elog.Failf("failed to %s CephFS nodeplugin rbac with error %v", action, err)
}
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginPSP)
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginPSP)
if err != nil {
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginPSP, err)
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginPSP, err)
}
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
if err != nil {
@ -200,7 +200,7 @@ var _ = Describe("cephfs", func() {
}
deployCephfsPlugin()
}
err := createConfigMap(cephfsDirPath, f.ClientSet, f)
err := createConfigMap(cephFSDirPath, f.ClientSet, f)
if err != nil {
e2elog.Failf("failed to create configmap with error %v", err)
}
@ -239,7 +239,7 @@ var _ = Describe("cephfs", func() {
// log all details from the namespace where Ceph-CSI is deployed
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
}
err := deleteConfigMap(cephfsDirPath)
err := deleteConfigMap(cephFSDirPath)
if err != nil {
e2elog.Failf("failed to delete configmap with error %v", err)
}
@ -255,7 +255,7 @@ var _ = Describe("cephfs", func() {
if err != nil {
e2elog.Failf("failed to delete node secret with error %v", err)
}
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass with error %v", err)
}
@ -272,25 +272,25 @@ var _ = Describe("cephfs", func() {
Context("Test CephFS CSI", func() {
It("Test CephFS CSI", func() {
pvcPath := cephfsExamplePath + "pvc.yaml"
appPath := cephfsExamplePath + "pod.yaml"
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
pvcSmartClonePath := cephfsExamplePath + "pvc-clone.yaml"
appClonePath := cephfsExamplePath + "pod-restore.yaml"
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
snapshotPath := cephfsExamplePath + "snapshot.yaml"
pvcPath := cephFSExamplePath + "pvc.yaml"
appPath := cephFSExamplePath + "pod.yaml"
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
appClonePath := cephFSExamplePath + "pod-restore.yaml"
appSmartClonePath := cephFSExamplePath + "pod-clone.yaml"
snapshotPath := cephFSExamplePath + "snapshot.yaml"
By("checking provisioner deployment is running", func() {
err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
err := waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err)
}
})
By("checking nodeplugin deamonset pods are running", func() {
err := waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
err := waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for daemonset %s with error %v", cephfsDeamonSetName, err)
e2elog.Failf("timeout waiting for daemonset %s with error %v", cephFSDeamonSetName, err)
}
})
@ -302,11 +302,11 @@ var _ = Describe("cephfs", func() {
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
}
// Deleting the storageclass and secret created by helm
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
}
err = deleteResource(cephfsExamplePath + "secret.yaml")
err = deleteResource(cephFSExamplePath + "secret.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
}
@ -314,7 +314,7 @@ var _ = Describe("cephfs", func() {
}
By("check static PVC", func() {
scPath := cephfsExamplePath + "secret.yaml"
scPath := cephFSExamplePath + "secret.yaml"
err := validateCephFsStaticPV(f, appPath, scPath)
if err != nil {
e2elog.Failf("failed to validate CephFS static pv with error %v", err)
@ -330,7 +330,7 @@ var _ = Describe("cephfs", func() {
if err != nil {
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
}
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
}
@ -378,7 +378,7 @@ var _ = Describe("cephfs", func() {
e2elog.Failf("failed to delete PVC with error %v", err)
}
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass with error %v", err)
}
@ -399,7 +399,7 @@ var _ = Describe("cephfs", func() {
if err != nil {
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
}
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
}
@ -418,7 +418,7 @@ var _ = Describe("cephfs", func() {
if err != nil {
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
}
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
}
@ -529,7 +529,7 @@ var _ = Describe("cephfs", func() {
})
By("validate multiple subvolumegroup creation", func() {
err := deleteResource(cephfsExamplePath + "storageclass.yaml")
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass with error %v", err)
}
@ -538,7 +538,7 @@ var _ = Describe("cephfs", func() {
"clusterID-1": "subvolgrp1",
"clusterID-2": "subvolgrp2",
}
err = createCustomConfigMap(f.ClientSet, cephfsDirPath, subvolgrpInfo)
err = createCustomConfigMap(f.ClientSet, cephFSDirPath, subvolgrpInfo)
if err != nil {
e2elog.Failf("failed to create configmap with error %v", err)
}
@ -553,7 +553,7 @@ var _ = Describe("cephfs", func() {
if err != nil {
e2elog.Failf("failed to validate pvc and application with error %v", err)
}
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass with error %v", err)
}
@ -574,7 +574,7 @@ var _ = Describe("cephfs", func() {
if err != nil {
e2elog.Failf("failed to validate pvc and application with error %v", err)
}
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass with error %v", err)
}
@ -582,11 +582,11 @@ var _ = Describe("cephfs", func() {
if err != nil {
e2elog.Failf("failed to validate subvolume group with error %v", err)
}
err = deleteConfigMap(cephfsDirPath)
err = deleteConfigMap(cephFSDirPath)
if err != nil {
e2elog.Failf("failed to delete configmap with error %v", err)
}
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
if err != nil {
e2elog.Failf("failed to create configmap with error %v", err)
}
@ -703,7 +703,7 @@ var _ = Describe("cephfs", func() {
e2elog.Failf("failed to delete PVC with error %v", err)
}
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
}
@ -782,7 +782,7 @@ var _ = Describe("cephfs", func() {
e2elog.Failf("failed to delete snapshot (%s): %v", f.UniqueName, err)
}
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
if err != nil {
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
}

View File

@ -43,7 +43,7 @@ func createCephfsStorageClass(
f *framework.Framework,
enablePool bool,
params map[string]string) error {
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml")
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "storageclass.yaml")
sc, err := getStorageClass(scPath)
if err != nil {
return err
@ -90,7 +90,7 @@ func createCephfsStorageClass(
}
func createCephfsSecret(f *framework.Framework, secretName, userName, userKey string) error {
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "secret.yaml")
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "secret.yaml")
sc, err := getSecret(scPath)
if err != nil {
return err
@ -131,9 +131,9 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
_, stdErr, err := execCommandInDaemonsetPod(
f,
cmd,
cephfsDeamonSetName,
cephFSDeamonSetName,
pod.Spec.NodeName,
cephfsContainerName,
cephFSContainerName,
cephCSINamespace)
if stdErr != "" {
e2elog.Logf("StdErr occurred: %s", stdErr)

View File

@ -18,9 +18,9 @@ func init() {
log.SetOutput(GinkgoWriter)
flag.IntVar(&deployTimeout, "deploy-timeout", 10, "timeout to wait for created kubernetes resources")
flag.BoolVar(&deployCephFS, "deploy-cephfs", true, "deploy cephfs csi driver")
flag.BoolVar(&deployCephFS, "deploy-cephfs", true, "deploy cephFS csi driver")
flag.BoolVar(&deployRBD, "deploy-rbd", true, "deploy rbd csi driver")
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephfs csi driver")
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver")
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")

View File

@ -649,9 +649,9 @@ func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeCla
return err
}
func deletePool(name string, cephfs bool, f *framework.Framework) error {
func deletePool(name string, cephFS bool, f *framework.Framework) error {
cmds := []string{}
if cephfs {
if cephFS {
// ceph fs fail
// ceph fs rm myfs --yes-i-really-mean-it
// ceph osd pool delete myfs-metadata myfs-metadata

View File

@ -171,7 +171,7 @@ func deleteRBDSnapshotClass() error {
}
func createCephFSSnapshotClass(f *framework.Framework) error {
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "snapshotclass.yaml")
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "snapshotclass.yaml")
sc := getSnapshotClass(scPath)
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-name"] = cephFSProvisionerSecretName

View File

@ -37,7 +37,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
appKey = "app"
appLabel = "cephfs-upgrade-testing"
)
// deploy cephfs CSI
// deploy cephFS CSI
BeforeEach(func() {
if !upgradeTesting || !testCephFS {
Skip("Skipping CephFS Upgrade Test")
@ -60,7 +60,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
if err != nil {
e2elog.Failf("failed to upgrade csi with error %v", err)
}
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
if err != nil {
e2elog.Failf("failed to create configmap with error %v", err)
}
@ -108,7 +108,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
// log all details from the namespace where Ceph-CSI is deployed
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
}
err = deleteConfigMap(cephfsDirPath)
err = deleteConfigMap(cephFSDirPath)
if err != nil {
e2elog.Failf("failed to delete configmap with error %v", err)
}
@ -124,11 +124,11 @@ var _ = Describe("CephFS Upgrade Testing", func() {
if err != nil {
e2elog.Failf("failed to delete node secret with error %v", err)
}
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass with error %v", err)
}
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
if err != nil {
e2elog.Failf("failed to delete storageclass with error %v", err)
}
@ -148,22 +148,22 @@ var _ = Describe("CephFS Upgrade Testing", func() {
Context("Cephfs Upgrade Test", func() {
It("Cephfs Upgrade Test", func() {
By("checking provisioner deployment is running", func() {
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err)
}
})
By("checking nodeplugin deamonset pods are running", func() {
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephfsDeamonSetName, err)
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephFSDeamonSetName, err)
}
})
By("upgrade to latest changes and verify app re-mount", func() {
// TODO: fetch pvc size from spec.
pvcPath := cephfsExamplePath + "pvc.yaml"
appPath := cephfsExamplePath + "pod.yaml"
pvcPath := cephFSExamplePath + "pvc.yaml"
appPath := cephFSExamplePath + "pod.yaml"
data := "check data persists"
label := make(map[string]string)
@ -218,7 +218,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
// pvc clone is only supported from v1.16+
if k8sVersionGreaterEquals(f.ClientSet, 1, 17) {
// Create snapshot of the pvc
snapshotPath := cephfsExamplePath + "snapshot.yaml"
snapshotPath := cephFSExamplePath + "snapshot.yaml"
snap := getSnapshot(snapshotPath)
snap.Name = "cephfs-pvc-snapshot"
snap.Namespace = f.UniqueName
@ -241,14 +241,14 @@ var _ = Describe("CephFS Upgrade Testing", func() {
}
deployCephfsPlugin()
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", cephfsDeploymentName, err)
e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", cephFSDeploymentName, err)
}
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
if err != nil {
e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", cephfsDeamonSetName, err)
e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", cephFSDeamonSetName, err)
}
app.Labels = label
@ -261,8 +261,8 @@ var _ = Describe("CephFS Upgrade Testing", func() {
})
By("Create clone from a snapshot", func() {
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
appClonePath := cephfsExamplePath + "pod-restore.yaml"
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
appClonePath := cephFSExamplePath + "pod-restore.yaml"
label := make(map[string]string)
// pvc clone is only supported from v1.16+
@ -310,7 +310,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
}
// Delete the snapshot of the parent pvc.
snapshotPath := cephfsExamplePath + "snapshot.yaml"
snapshotPath := cephFSExamplePath + "snapshot.yaml"
snap := getSnapshot(snapshotPath)
snap.Name = "cephfs-pvc-snapshot"
snap.Namespace = f.UniqueName
@ -324,8 +324,8 @@ var _ = Describe("CephFS Upgrade Testing", func() {
})
By("Create clone from existing PVC", func() {
pvcSmartClonePath := cephfsExamplePath + "pvc-clone.yaml"
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
appSmartClonePath := cephFSExamplePath + "pod-clone.yaml"
label := make(map[string]string)
// pvc clone is only supported from v1.16+

View File

@ -422,7 +422,7 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error {
return err
}
func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framework) error {
func pvcDeleteWhenPoolNotFound(pvcPath string, cephFS bool, f *framework.Framework) error {
pvc, err := loadPVC(pvcPath)
if err != nil {
return err
@ -433,13 +433,13 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo
if err != nil {
return err
}
if cephfs {
if cephFS {
err = deleteBackingCephFSVolume(f, pvc)
if err != nil {
return err
}
// delete cephfs filesystem
err = deletePool("myfs", cephfs, f)
// delete cephFS filesystem
err = deletePool("myfs", cephFS, f)
if err != nil {
return err
}
@ -449,7 +449,7 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo
return err
}
// delete rbd pool
err = deletePool(defaultRBDPool, cephfs, f)
err = deletePool(defaultRBDPool, cephFS, f)
if err != nil {
return err
}

View File

@ -44,7 +44,7 @@ data:
"<MONValueN>"
],
"cephFS": {
"subvolumeGroup": "<subvolumegroup for cephfs volumes>"
"subvolumeGroup": "<subvolumegroup for cephFS volumes>"
}
}
]