mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
cleanup: rework on naming conventions
This commits replaces cephfs -> cephFS to maintain consistency throughout the codebase Updates: #1465 Signed-off-by: Yati Padia <ypadia@redhat.com>
This commit is contained in:
parent
34a21cdbe3
commit
1cf14cd83c
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
4
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -13,7 +13,7 @@ A clear and concise description of what the bug is.
|
|||||||
- Image/version of Ceph CSI driver :
|
- Image/version of Ceph CSI driver :
|
||||||
- Helm chart version :
|
- Helm chart version :
|
||||||
- Kernel version :
|
- Kernel version :
|
||||||
- Mounter used for mounting PVC (for cephfs its `fuse` or `kernel`. for rbd its
|
- Mounter used for mounting PVC (for cephFS its `fuse` or `kernel`. for rbd its
|
||||||
`krbd` or `rbd-nbd`) :
|
`krbd` or `rbd-nbd`) :
|
||||||
- Kubernetes cluster version :
|
- Kubernetes cluster version :
|
||||||
- Ceph cluster version :
|
- Ceph cluster version :
|
||||||
@ -61,7 +61,7 @@ If the issue is in PVC mounting please attach complete logs of below containers.
|
|||||||
- if required attach dmesg logs.
|
- if required attach dmesg logs.
|
||||||
|
|
||||||
**Note:-** If its a rbd issue please provide only rbd related logs, if its a
|
**Note:-** If its a rbd issue please provide only rbd related logs, if its a
|
||||||
cephfs issue please provide cephfs logs.
|
cephFS issue please provide cephFS logs.
|
||||||
|
|
||||||
# Additional context #
|
# Additional context #
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ Independent CSI plugins are provided to support RBD and CephFS backed volumes,
|
|||||||
- For details about configuration and deployment of RBD plugin, please refer
|
- For details about configuration and deployment of RBD plugin, please refer
|
||||||
[rbd doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md) and
|
[rbd doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-rbd.md) and
|
||||||
for CephFS plugin configuration and deployment please
|
for CephFS plugin configuration and deployment please
|
||||||
refer [cephfs doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-cephfs.md).
|
refer [cephFS doc](https://github.com/ceph/ceph-csi/blob/devel/docs/deploy-cephfs.md).
|
||||||
- For example usage of RBD and CephFS CSI plugins, see examples in `examples/`.
|
- For example usage of RBD and CephFS CSI plugins, see examples in `examples/`.
|
||||||
- Stale resource cleanup, please refer [cleanup doc](docs/resource-cleanup.md).
|
- Stale resource cleanup, please refer [cleanup doc](docs/resource-cleanup.md).
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
# ceph-csi-cephfs
|
# ceph-csi-cephfs
|
||||||
|
|
||||||
The ceph-csi-cephfs chart adds cephfs volume support to your cluster.
|
The ceph-csi-cephfs chart adds cephFS volume support to your cluster.
|
||||||
|
|
||||||
## Install from release repo
|
## Install from release repo
|
||||||
|
|
||||||
@ -134,12 +134,12 @@ charts and their default values.
|
|||||||
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
|
| `configMapName` | Name of the configmap which contains cluster configuration | `ceph-csi-config` |
|
||||||
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
|
| `externallyManagedConfigmap` | Specifies the use of an externally provided configmap | `false` |
|
||||||
| `storageClass.create` | Specifies whether the StorageClass should be created | `false` |
|
| `storageClass.create` | Specifies whether the StorageClass should be created | `false` |
|
||||||
| `storageClass.name` | Specifies the cephfs StorageClass name | `csi-cephfs-sc` |
|
| `storageClass.name` | Specifies the cephFS StorageClass name | `csi-cephfs-sc` |
|
||||||
| `storageClass.clusterID` | String representing a Ceph cluster to provision storage from | `<cluster-ID>` |
|
| `storageClass.clusterID` | String representing a Ceph cluster to provision storage from | `<cluster-ID>` |
|
||||||
| `storageClass.fsName` | CephFS filesystem name into which the volume shall be created | `myfs` |
|
| `storageClass.fsName` | CephFS filesystem name into which the volume shall be created | `myfs` |
|
||||||
| `storageClass.pool` | Ceph pool into which volume data shall be stored | `""` |
|
| `storageClass.pool` | Ceph pool into which volume data shall be stored | `""` |
|
||||||
| `storageClass.fuseMountOptions` | Comma separated string of Ceph-fuse mount options | `""` |
|
| `storageClass.fuseMountOptions` | Comma separated string of Ceph-fuse mount options | `""` |
|
||||||
| `storageclass.kernelMountOptions` | Comma separated string of Cephfs kernel mount options | `""` |
|
| `storageclass.kernelMountOptions` | Comma separated string of CephFS kernel mount options | `""` |
|
||||||
| `storageClass.mounter` | The driver can use either ceph-fuse (fuse) or ceph kernelclient (kernel) | `""` |
|
| `storageClass.mounter` | The driver can use either ceph-fuse (fuse) or ceph kernelclient (kernel) | `""` |
|
||||||
| `storageClass.volumeNamePrefix` | Prefix to use for naming subvolumes | `""` |
|
| `storageClass.volumeNamePrefix` | Prefix to use for naming subvolumes | `""` |
|
||||||
| `storageClass.provisionerSecret` | The secrets have to contain user and/or Ceph admin credentials. | `csi-cephfs-secret` |
|
| `storageClass.provisionerSecret` | The secrets have to contain user and/or Ceph admin credentials. | `csi-cephfs-secret` |
|
||||||
@ -152,8 +152,8 @@ charts and their default values.
|
|||||||
| `storageClass.allowVolumeExpansion` | Specifies whether volume expansion should be allowed | `true` |
|
| `storageClass.allowVolumeExpansion` | Specifies whether volume expansion should be allowed | `true` |
|
||||||
| `storageClass.mountOptions` | Specifies the mount options | `[]` |
|
| `storageClass.mountOptions` | Specifies the mount options | `[]` |
|
||||||
| `secret.create` | Specifies whether the secret should be created | `false` |
|
| `secret.create` | Specifies whether the secret should be created | `false` |
|
||||||
| `secret.name` | Specifies the cephfs secret name | `csi-cephfs-secret` |
|
| `secret.name` | Specifies the cephFS secret name | `csi-cephfs-secret` |
|
||||||
| `secret.adminID` | Specifies the admin ID of the cephfs secret | `<plaintext ID>` |
|
| `secret.adminID` | Specifies the admin ID of the cephFS secret | `<plaintext ID>` |
|
||||||
| `secret.adminKey` | Specifies the key that corresponds to the adminID | `<Ceph auth key corresponding to ID above>` |
|
| `secret.adminKey` | Specifies the key that corresponds to the adminID | `<Ceph auth key corresponding to ID above>` |
|
||||||
|
|
||||||
### Command Line
|
### Command Line
|
||||||
|
@ -36,12 +36,12 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
rbdType = "rbd"
|
rbdType = "rbd"
|
||||||
cephfsType = "cephfs"
|
cephFSType = "cephfs"
|
||||||
livenessType = "liveness"
|
livenessType = "liveness"
|
||||||
controllerType = "controller"
|
controllerType = "controller"
|
||||||
|
|
||||||
rbdDefaultName = "rbd.csi.ceph.com"
|
rbdDefaultName = "rbd.csi.ceph.com"
|
||||||
cephfsDefaultName = "cephfs.csi.ceph.com"
|
cephFSDefaultName = "cephfs.csi.ceph.com"
|
||||||
livenessDefaultName = "liveness.csi.ceph.com"
|
livenessDefaultName = "liveness.csi.ceph.com"
|
||||||
|
|
||||||
pollTime = 60 // seconds
|
pollTime = 60 // seconds
|
||||||
@ -144,8 +144,8 @@ func getDriverName() string {
|
|||||||
switch conf.Vtype {
|
switch conf.Vtype {
|
||||||
case rbdType:
|
case rbdType:
|
||||||
return rbdDefaultName
|
return rbdDefaultName
|
||||||
case cephfsType:
|
case cephFSType:
|
||||||
return cephfsDefaultName
|
return cephFSDefaultName
|
||||||
case livenessType:
|
case livenessType:
|
||||||
return livenessDefaultName
|
return livenessDefaultName
|
||||||
default:
|
default:
|
||||||
@ -222,7 +222,7 @@ func main() {
|
|||||||
driver := rbd.NewDriver()
|
driver := rbd.NewDriver()
|
||||||
driver.Run(&conf)
|
driver.Run(&conf)
|
||||||
|
|
||||||
case cephfsType:
|
case cephFSType:
|
||||||
driver := cephfs.NewDriver()
|
driver := cephfs.NewDriver()
|
||||||
driver.Run(&conf)
|
driver.Run(&conf)
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
If the PVC is created with storage class which is having the `reclaimPolicy`
|
If the PVC is created with storage class which is having the `reclaimPolicy`
|
||||||
as `Retain` will not delete the PV object, backend omap metadata and backend image.
|
as `Retain` will not delete the PV object, backend omap metadata and backend image.
|
||||||
Manual deletion of PV will result in stale omap keys, values,
|
Manual deletion of PV will result in stale omap keys, values,
|
||||||
cephfs subvolume and rbd image.
|
cephFS subvolume and rbd image.
|
||||||
It is required to cleanup metadata and image separately.
|
It is required to cleanup metadata and image separately.
|
||||||
|
|
||||||
## Steps
|
## Steps
|
||||||
@ -67,7 +67,7 @@ a. remove rbd image(csi-vol-omapval, the prefix csi-vol is value of [volumeNameP
|
|||||||
Removing image: 100% complete...done.
|
Removing image: 100% complete...done.
|
||||||
```
|
```
|
||||||
|
|
||||||
b. remove cephfs subvolume(csi-vol-omapval)
|
b. remove cephFS subvolume(csi-vol-omapval)
|
||||||
|
|
||||||
```
|
```
|
||||||
ceph fs subvolume rm volume_name subvolume_name group_name
|
ceph fs subvolume rm volume_name subvolume_name group_name
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
- [End-to-End Testing](#end-to-end-testing)
|
- [End-to-End Testing](#end-to-end-testing)
|
||||||
- [Introduction](#introduction)
|
- [Introduction](#introduction)
|
||||||
- [Install Kubernetes](#install-kubernetes)
|
- [Install Kubernetes](#install-kubernetes)
|
||||||
|
- [Deploy Rook](#deploy-rook)
|
||||||
- [Test parameters](#test-parameters)
|
- [Test parameters](#test-parameters)
|
||||||
- [E2E for snapshot](#e2e-for-snapshot)
|
- [E2E for snapshot](#e2e-for-snapshot)
|
||||||
- [Running E2E](#running-e2e)
|
- [Running E2E](#running-e2e)
|
||||||
@ -89,9 +90,9 @@ are available while running tests:
|
|||||||
| flag | description |
|
| flag | description |
|
||||||
| ----------------- | ----------------------------------------------------------------------------- |
|
| ----------------- | ----------------------------------------------------------------------------- |
|
||||||
| deploy-timeout | Timeout to wait for created kubernetes resources (default: 10 minutes) |
|
| deploy-timeout | Timeout to wait for created kubernetes resources (default: 10 minutes) |
|
||||||
| deploy-cephfs | Deploy cephfs csi driver as part of E2E (default: true) |
|
| deploy-cephfs | Deploy cephFS csi driver as part of E2E (default: true) |
|
||||||
| deploy-rbd | Deploy rbd csi driver as part of E2E (default: true) |
|
| deploy-rbd | Deploy rbd csi driver as part of E2E (default: true) |
|
||||||
| test-cephfs | Test cephfs csi driver as part of E2E (default: true) |
|
| test-cephfs | Test cephFS csi driver as part of E2E (default: true) |
|
||||||
| upgrade-testing | Perform upgrade testing (default: false) |
|
| upgrade-testing | Perform upgrade testing (default: false) |
|
||||||
| upgrade-version | Target version for upgrade testing (default: "v3.3.1") |
|
| upgrade-version | Target version for upgrade testing (default: "v3.3.1") |
|
||||||
| test-rbd | Test rbd csi driver as part of E2E (default: true) |
|
| test-rbd | Test rbd csi driver as part of E2E (default: true) |
|
||||||
|
120
e2e/cephfs.go
120
e2e/cephfs.go
@ -18,17 +18,17 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
cephfsProvisioner = "csi-cephfsplugin-provisioner.yaml"
|
cephFSProvisioner = "csi-cephfsplugin-provisioner.yaml"
|
||||||
cephfsProvisionerRBAC = "csi-provisioner-rbac.yaml"
|
cephFSProvisionerRBAC = "csi-provisioner-rbac.yaml"
|
||||||
cephfsProvisionerPSP = "csi-provisioner-psp.yaml"
|
cephFSProvisionerPSP = "csi-provisioner-psp.yaml"
|
||||||
cephfsNodePlugin = "csi-cephfsplugin.yaml"
|
cephFSNodePlugin = "csi-cephfsplugin.yaml"
|
||||||
cephfsNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
|
cephFSNodePluginRBAC = "csi-nodeplugin-rbac.yaml"
|
||||||
cephfsNodePluginPSP = "csi-nodeplugin-psp.yaml"
|
cephFSNodePluginPSP = "csi-nodeplugin-psp.yaml"
|
||||||
cephfsDeploymentName = "csi-cephfsplugin-provisioner"
|
cephFSDeploymentName = "csi-cephfsplugin-provisioner"
|
||||||
cephfsDeamonSetName = "csi-cephfsplugin"
|
cephFSDeamonSetName = "csi-cephfsplugin"
|
||||||
cephfsContainerName = "csi-cephfsplugin"
|
cephFSContainerName = "csi-cephfsplugin"
|
||||||
cephfsDirPath = "../deploy/cephfs/kubernetes/"
|
cephFSDirPath = "../deploy/cephfs/kubernetes/"
|
||||||
cephfsExamplePath = examplePath + "cephfs/"
|
cephFSExamplePath = examplePath + "cephfs/"
|
||||||
subvolumegroup = "e2e"
|
subvolumegroup = "e2e"
|
||||||
fileSystemName = "myfs"
|
fileSystemName = "myfs"
|
||||||
)
|
)
|
||||||
@ -36,23 +36,23 @@ var (
|
|||||||
func deployCephfsPlugin() {
|
func deployCephfsPlugin() {
|
||||||
// delete objects deployed by rook
|
// delete objects deployed by rook
|
||||||
|
|
||||||
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, "--ignore-not-found=true", ns, "delete", "-f", "-")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
e2elog.Failf("failed to delete provisioner rbac %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||||
}
|
}
|
||||||
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
_, err = framework.RunKubectlInput(cephCSINamespace, data, "delete", "--ignore-not-found=true", ns, "-f", "-")
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
e2elog.Failf("failed to delete nodeplugin rbac %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
createORDeleteCephfsResources(kubectlCreate)
|
createORDeleteCephfsResources(kubectlCreate)
|
||||||
@ -63,12 +63,12 @@ func deleteCephfsPlugin() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createORDeleteCephfsResources(action kubectlAction) {
|
func createORDeleteCephfsResources(action kubectlAction) {
|
||||||
csiDriver, err := ioutil.ReadFile(cephfsDirPath + csiDriverObject)
|
csiDriver, err := ioutil.ReadFile(cephFSDirPath + csiDriverObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// createORDeleteRbdResources is used for upgrade testing as csidriverObject is
|
// createORDeleteRbdResources is used for upgrade testing as csidriverObject is
|
||||||
// newly added, discarding file not found error.
|
// newly added, discarding file not found error.
|
||||||
if !os.IsNotExist(err) {
|
if !os.IsNotExist(err) {
|
||||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+csiDriverObject, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+csiDriverObject, err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = retryKubectlInput(cephCSINamespace, action, string(csiDriver), deployTimeout)
|
err = retryKubectlInput(cephCSINamespace, action, string(csiDriver), deployTimeout)
|
||||||
@ -89,55 +89,55 @@ func createORDeleteCephfsResources(action kubectlAction) {
|
|||||||
e2elog.Failf("failed to %s ceph-conf configmap object with error %v", action, err)
|
e2elog.Failf("failed to %s ceph-conf configmap object with error %v", action, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
data, err := replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisioner)
|
data, err := replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisioner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisioner, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisioner, err)
|
||||||
}
|
}
|
||||||
data = oneReplicaDeployYaml(data)
|
data = oneReplicaDeployYaml(data)
|
||||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to %s CephFS provisioner with error %v", action, err)
|
e2elog.Failf("failed to %s CephFS provisioner with error %v", action, err)
|
||||||
}
|
}
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerRBAC)
|
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerRBAC)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerRBAC, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerRBAC, err)
|
||||||
}
|
}
|
||||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to %s CephFS provisioner rbac with error %v", action, err)
|
e2elog.Failf("failed to %s CephFS provisioner rbac with error %v", action, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsProvisionerPSP)
|
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSProvisionerPSP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsProvisionerPSP, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSProvisionerPSP, err)
|
||||||
}
|
}
|
||||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to %s CephFS provisioner psp with error %v", action, err)
|
e2elog.Failf("failed to %s CephFS provisioner psp with error %v", action, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePlugin)
|
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePlugin)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePlugin, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePlugin, err)
|
||||||
}
|
}
|
||||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to %s CephFS nodeplugin with error %v", action, err)
|
e2elog.Failf("failed to %s CephFS nodeplugin with error %v", action, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginRBAC)
|
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginRBAC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginRBAC, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginRBAC, err)
|
||||||
}
|
}
|
||||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to %s CephFS nodeplugin rbac with error %v", action, err)
|
e2elog.Failf("failed to %s CephFS nodeplugin rbac with error %v", action, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
data, err = replaceNamespaceInTemplate(cephfsDirPath + cephfsNodePluginPSP)
|
data, err = replaceNamespaceInTemplate(cephFSDirPath + cephFSNodePluginPSP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to read content from %s with error %v", cephfsDirPath+cephfsNodePluginPSP, err)
|
e2elog.Failf("failed to read content from %s with error %v", cephFSDirPath+cephFSNodePluginPSP, err)
|
||||||
}
|
}
|
||||||
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
err = retryKubectlInput(cephCSINamespace, action, data, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -200,7 +200,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
}
|
}
|
||||||
deployCephfsPlugin()
|
deployCephfsPlugin()
|
||||||
}
|
}
|
||||||
err := createConfigMap(cephfsDirPath, f.ClientSet, f)
|
err := createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to create configmap with error %v", err)
|
e2elog.Failf("failed to create configmap with error %v", err)
|
||||||
}
|
}
|
||||||
@ -239,7 +239,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
// log all details from the namespace where Ceph-CSI is deployed
|
// log all details from the namespace where Ceph-CSI is deployed
|
||||||
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||||
}
|
}
|
||||||
err := deleteConfigMap(cephfsDirPath)
|
err := deleteConfigMap(cephFSDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||||
}
|
}
|
||||||
@ -255,7 +255,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete node secret with error %v", err)
|
e2elog.Failf("failed to delete node secret with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -272,25 +272,25 @@ var _ = Describe("cephfs", func() {
|
|||||||
|
|
||||||
Context("Test CephFS CSI", func() {
|
Context("Test CephFS CSI", func() {
|
||||||
It("Test CephFS CSI", func() {
|
It("Test CephFS CSI", func() {
|
||||||
pvcPath := cephfsExamplePath + "pvc.yaml"
|
pvcPath := cephFSExamplePath + "pvc.yaml"
|
||||||
appPath := cephfsExamplePath + "pod.yaml"
|
appPath := cephFSExamplePath + "pod.yaml"
|
||||||
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
|
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
|
||||||
pvcSmartClonePath := cephfsExamplePath + "pvc-clone.yaml"
|
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
|
||||||
appClonePath := cephfsExamplePath + "pod-restore.yaml"
|
appClonePath := cephFSExamplePath + "pod-restore.yaml"
|
||||||
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
|
appSmartClonePath := cephFSExamplePath + "pod-clone.yaml"
|
||||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||||
|
|
||||||
By("checking provisioner deployment is running", func() {
|
By("checking provisioner deployment is running", func() {
|
||||||
err := waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err := waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
|
e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("checking nodeplugin deamonset pods are running", func() {
|
By("checking nodeplugin deamonset pods are running", func() {
|
||||||
err := waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err := waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("timeout waiting for daemonset %s with error %v", cephfsDeamonSetName, err)
|
e2elog.Failf("timeout waiting for daemonset %s with error %v", cephFSDeamonSetName, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
@ -302,11 +302,11 @@ var _ = Describe("cephfs", func() {
|
|||||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||||
}
|
}
|
||||||
// Deleting the storageclass and secret created by helm
|
// Deleting the storageclass and secret created by helm
|
||||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteResource(cephfsExamplePath + "secret.yaml")
|
err = deleteResource(cephFSExamplePath + "secret.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -314,7 +314,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
By("check static PVC", func() {
|
By("check static PVC", func() {
|
||||||
scPath := cephfsExamplePath + "secret.yaml"
|
scPath := cephFSExamplePath + "secret.yaml"
|
||||||
err := validateCephFsStaticPV(f, appPath, scPath)
|
err := validateCephFsStaticPV(f, appPath, scPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to validate CephFS static pv with error %v", err)
|
e2elog.Failf("failed to validate CephFS static pv with error %v", err)
|
||||||
@ -330,7 +330,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -378,7 +378,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
e2elog.Failf("failed to delete PVC with error %v", err)
|
e2elog.Failf("failed to delete PVC with error %v", err)
|
||||||
}
|
}
|
||||||
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
validateSubvolumeCount(f, 0, fileSystemName, subvolumegroup)
|
||||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -399,7 +399,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -418,7 +418,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
e2elog.Failf("failed to validate CephFS pvc and application binding with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
e2elog.Failf("failed to delete CephFS storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -529,7 +529,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("validate multiple subvolumegroup creation", func() {
|
By("validate multiple subvolumegroup creation", func() {
|
||||||
err := deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -538,7 +538,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
"clusterID-1": "subvolgrp1",
|
"clusterID-1": "subvolgrp1",
|
||||||
"clusterID-2": "subvolgrp2",
|
"clusterID-2": "subvolgrp2",
|
||||||
}
|
}
|
||||||
err = createCustomConfigMap(f.ClientSet, cephfsDirPath, subvolgrpInfo)
|
err = createCustomConfigMap(f.ClientSet, cephFSDirPath, subvolgrpInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to create configmap with error %v", err)
|
e2elog.Failf("failed to create configmap with error %v", err)
|
||||||
}
|
}
|
||||||
@ -553,7 +553,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -574,7 +574,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
e2elog.Failf("failed to validate pvc and application with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -582,11 +582,11 @@ var _ = Describe("cephfs", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to validate subvolume group with error %v", err)
|
e2elog.Failf("failed to validate subvolume group with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteConfigMap(cephfsDirPath)
|
err = deleteConfigMap(cephFSDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||||
}
|
}
|
||||||
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
|
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to create configmap with error %v", err)
|
e2elog.Failf("failed to create configmap with error %v", err)
|
||||||
}
|
}
|
||||||
@ -703,7 +703,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
e2elog.Failf("failed to delete PVC with error %v", err)
|
e2elog.Failf("failed to delete PVC with error %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
|
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
|
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -782,7 +782,7 @@ var _ = Describe("cephfs", func() {
|
|||||||
e2elog.Failf("failed to delete snapshot (%s): %v", f.UniqueName, err)
|
e2elog.Failf("failed to delete snapshot (%s): %v", f.UniqueName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
|
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
|
e2elog.Failf("failed to delete CephFS snapshotclass with error %v", err)
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ func createCephfsStorageClass(
|
|||||||
f *framework.Framework,
|
f *framework.Framework,
|
||||||
enablePool bool,
|
enablePool bool,
|
||||||
params map[string]string) error {
|
params map[string]string) error {
|
||||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml")
|
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "storageclass.yaml")
|
||||||
sc, err := getStorageClass(scPath)
|
sc, err := getStorageClass(scPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -90,7 +90,7 @@ func createCephfsStorageClass(
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createCephfsSecret(f *framework.Framework, secretName, userName, userKey string) error {
|
func createCephfsSecret(f *framework.Framework, secretName, userName, userKey string) error {
|
||||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "secret.yaml")
|
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "secret.yaml")
|
||||||
sc, err := getSecret(scPath)
|
sc, err := getSecret(scPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -131,9 +131,9 @@ func unmountCephFSVolume(f *framework.Framework, appName, pvcName string) error
|
|||||||
_, stdErr, err := execCommandInDaemonsetPod(
|
_, stdErr, err := execCommandInDaemonsetPod(
|
||||||
f,
|
f,
|
||||||
cmd,
|
cmd,
|
||||||
cephfsDeamonSetName,
|
cephFSDeamonSetName,
|
||||||
pod.Spec.NodeName,
|
pod.Spec.NodeName,
|
||||||
cephfsContainerName,
|
cephFSContainerName,
|
||||||
cephCSINamespace)
|
cephCSINamespace)
|
||||||
if stdErr != "" {
|
if stdErr != "" {
|
||||||
e2elog.Logf("StdErr occurred: %s", stdErr)
|
e2elog.Logf("StdErr occurred: %s", stdErr)
|
||||||
|
@ -18,9 +18,9 @@ func init() {
|
|||||||
log.SetOutput(GinkgoWriter)
|
log.SetOutput(GinkgoWriter)
|
||||||
|
|
||||||
flag.IntVar(&deployTimeout, "deploy-timeout", 10, "timeout to wait for created kubernetes resources")
|
flag.IntVar(&deployTimeout, "deploy-timeout", 10, "timeout to wait for created kubernetes resources")
|
||||||
flag.BoolVar(&deployCephFS, "deploy-cephfs", true, "deploy cephfs csi driver")
|
flag.BoolVar(&deployCephFS, "deploy-cephfs", true, "deploy cephFS csi driver")
|
||||||
flag.BoolVar(&deployRBD, "deploy-rbd", true, "deploy rbd csi driver")
|
flag.BoolVar(&deployRBD, "deploy-rbd", true, "deploy rbd csi driver")
|
||||||
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephfs csi driver")
|
flag.BoolVar(&testCephFS, "test-cephfs", true, "test cephFS csi driver")
|
||||||
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
|
flag.BoolVar(&testRBD, "test-rbd", true, "test rbd csi driver")
|
||||||
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
|
flag.BoolVar(&helmTest, "helm-test", false, "tests running on deployment via helm")
|
||||||
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")
|
flag.BoolVar(&upgradeTesting, "upgrade-testing", false, "perform upgrade testing")
|
||||||
|
@ -649,9 +649,9 @@ func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeCla
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func deletePool(name string, cephfs bool, f *framework.Framework) error {
|
func deletePool(name string, cephFS bool, f *framework.Framework) error {
|
||||||
cmds := []string{}
|
cmds := []string{}
|
||||||
if cephfs {
|
if cephFS {
|
||||||
// ceph fs fail
|
// ceph fs fail
|
||||||
// ceph fs rm myfs --yes-i-really-mean-it
|
// ceph fs rm myfs --yes-i-really-mean-it
|
||||||
// ceph osd pool delete myfs-metadata myfs-metadata
|
// ceph osd pool delete myfs-metadata myfs-metadata
|
||||||
|
@ -171,7 +171,7 @@ func deleteRBDSnapshotClass() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func createCephFSSnapshotClass(f *framework.Framework) error {
|
func createCephFSSnapshotClass(f *framework.Framework) error {
|
||||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "snapshotclass.yaml")
|
scPath := fmt.Sprintf("%s/%s", cephFSExamplePath, "snapshotclass.yaml")
|
||||||
sc := getSnapshotClass(scPath)
|
sc := getSnapshotClass(scPath)
|
||||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-namespace"] = cephCSINamespace
|
||||||
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-name"] = cephFSProvisionerSecretName
|
sc.Parameters["csi.storage.k8s.io/snapshotter-secret-name"] = cephFSProvisionerSecretName
|
||||||
|
@ -37,7 +37,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
appKey = "app"
|
appKey = "app"
|
||||||
appLabel = "cephfs-upgrade-testing"
|
appLabel = "cephfs-upgrade-testing"
|
||||||
)
|
)
|
||||||
// deploy cephfs CSI
|
// deploy cephFS CSI
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
if !upgradeTesting || !testCephFS {
|
if !upgradeTesting || !testCephFS {
|
||||||
Skip("Skipping CephFS Upgrade Test")
|
Skip("Skipping CephFS Upgrade Test")
|
||||||
@ -60,7 +60,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to upgrade csi with error %v", err)
|
e2elog.Failf("failed to upgrade csi with error %v", err)
|
||||||
}
|
}
|
||||||
err = createConfigMap(cephfsDirPath, f.ClientSet, f)
|
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to create configmap with error %v", err)
|
e2elog.Failf("failed to create configmap with error %v", err)
|
||||||
}
|
}
|
||||||
@ -108,7 +108,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
// log all details from the namespace where Ceph-CSI is deployed
|
// log all details from the namespace where Ceph-CSI is deployed
|
||||||
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
framework.DumpAllNamespaceInfo(c, cephCSINamespace)
|
||||||
}
|
}
|
||||||
err = deleteConfigMap(cephfsDirPath)
|
err = deleteConfigMap(cephFSDirPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete configmap with error %v", err)
|
e2elog.Failf("failed to delete configmap with error %v", err)
|
||||||
}
|
}
|
||||||
@ -124,11 +124,11 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete node secret with error %v", err)
|
e2elog.Failf("failed to delete node secret with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteResource(cephfsExamplePath + "storageclass.yaml")
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
err = deleteResource(cephfsExamplePath + "snapshotclass.yaml")
|
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||||
}
|
}
|
||||||
@ -148,22 +148,22 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
Context("Cephfs Upgrade Test", func() {
|
Context("Cephfs Upgrade Test", func() {
|
||||||
It("Cephfs Upgrade Test", func() {
|
It("Cephfs Upgrade Test", func() {
|
||||||
By("checking provisioner deployment is running", func() {
|
By("checking provisioner deployment is running", func() {
|
||||||
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("timeout waiting for deployment %s with error %v", cephfsDeploymentName, err)
|
e2elog.Failf("timeout waiting for deployment %s with error %v", cephFSDeploymentName, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
By("checking nodeplugin deamonset pods are running", func() {
|
By("checking nodeplugin deamonset pods are running", func() {
|
||||||
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephfsDeamonSetName, err)
|
e2elog.Failf("timeout waiting for daemonset %s with error%v", cephFSDeamonSetName, err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
By("upgrade to latest changes and verify app re-mount", func() {
|
By("upgrade to latest changes and verify app re-mount", func() {
|
||||||
// TODO: fetch pvc size from spec.
|
// TODO: fetch pvc size from spec.
|
||||||
pvcPath := cephfsExamplePath + "pvc.yaml"
|
pvcPath := cephFSExamplePath + "pvc.yaml"
|
||||||
appPath := cephfsExamplePath + "pod.yaml"
|
appPath := cephFSExamplePath + "pod.yaml"
|
||||||
data := "check data persists"
|
data := "check data persists"
|
||||||
label := make(map[string]string)
|
label := make(map[string]string)
|
||||||
|
|
||||||
@ -218,7 +218,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
// pvc clone is only supported from v1.16+
|
// pvc clone is only supported from v1.16+
|
||||||
if k8sVersionGreaterEquals(f.ClientSet, 1, 17) {
|
if k8sVersionGreaterEquals(f.ClientSet, 1, 17) {
|
||||||
// Create snapshot of the pvc
|
// Create snapshot of the pvc
|
||||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||||
snap := getSnapshot(snapshotPath)
|
snap := getSnapshot(snapshotPath)
|
||||||
snap.Name = "cephfs-pvc-snapshot"
|
snap.Name = "cephfs-pvc-snapshot"
|
||||||
snap.Namespace = f.UniqueName
|
snap.Namespace = f.UniqueName
|
||||||
@ -241,14 +241,14 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
}
|
}
|
||||||
deployCephfsPlugin()
|
deployCephfsPlugin()
|
||||||
|
|
||||||
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err = waitForDeploymentComplete(cephFSDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", cephfsDeploymentName, err)
|
e2elog.Failf("timeout waiting for upgraded deployment %s with error %v", cephFSDeploymentName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = waitForDaemonSets(cephfsDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", cephfsDeamonSetName, err)
|
e2elog.Failf("timeout waiting for upgraded daemonset %s with error %v", cephFSDeamonSetName, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
app.Labels = label
|
app.Labels = label
|
||||||
@ -261,8 +261,8 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("Create clone from a snapshot", func() {
|
By("Create clone from a snapshot", func() {
|
||||||
pvcClonePath := cephfsExamplePath + "pvc-restore.yaml"
|
pvcClonePath := cephFSExamplePath + "pvc-restore.yaml"
|
||||||
appClonePath := cephfsExamplePath + "pod-restore.yaml"
|
appClonePath := cephFSExamplePath + "pod-restore.yaml"
|
||||||
label := make(map[string]string)
|
label := make(map[string]string)
|
||||||
|
|
||||||
// pvc clone is only supported from v1.16+
|
// pvc clone is only supported from v1.16+
|
||||||
@ -310,7 +310,7 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Delete the snapshot of the parent pvc.
|
// Delete the snapshot of the parent pvc.
|
||||||
snapshotPath := cephfsExamplePath + "snapshot.yaml"
|
snapshotPath := cephFSExamplePath + "snapshot.yaml"
|
||||||
snap := getSnapshot(snapshotPath)
|
snap := getSnapshot(snapshotPath)
|
||||||
snap.Name = "cephfs-pvc-snapshot"
|
snap.Name = "cephfs-pvc-snapshot"
|
||||||
snap.Namespace = f.UniqueName
|
snap.Namespace = f.UniqueName
|
||||||
@ -324,8 +324,8 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
|||||||
})
|
})
|
||||||
|
|
||||||
By("Create clone from existing PVC", func() {
|
By("Create clone from existing PVC", func() {
|
||||||
pvcSmartClonePath := cephfsExamplePath + "pvc-clone.yaml"
|
pvcSmartClonePath := cephFSExamplePath + "pvc-clone.yaml"
|
||||||
appSmartClonePath := cephfsExamplePath + "pod-clone.yaml"
|
appSmartClonePath := cephFSExamplePath + "pod-clone.yaml"
|
||||||
label := make(map[string]string)
|
label := make(map[string]string)
|
||||||
|
|
||||||
// pvc clone is only supported from v1.16+
|
// pvc clone is only supported from v1.16+
|
||||||
|
10
e2e/utils.go
10
e2e/utils.go
@ -422,7 +422,7 @@ func checkDataPersist(pvcPath, appPath string, f *framework.Framework) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framework) error {
|
func pvcDeleteWhenPoolNotFound(pvcPath string, cephFS bool, f *framework.Framework) error {
|
||||||
pvc, err := loadPVC(pvcPath)
|
pvc, err := loadPVC(pvcPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -433,13 +433,13 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if cephfs {
|
if cephFS {
|
||||||
err = deleteBackingCephFSVolume(f, pvc)
|
err = deleteBackingCephFSVolume(f, pvc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// delete cephfs filesystem
|
// delete cephFS filesystem
|
||||||
err = deletePool("myfs", cephfs, f)
|
err = deletePool("myfs", cephFS, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -449,7 +449,7 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephfs bool, f *framework.Framewo
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// delete rbd pool
|
// delete rbd pool
|
||||||
err = deletePool(defaultRBDPool, cephfs, f)
|
err = deletePool(defaultRBDPool, cephFS, f)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -44,7 +44,7 @@ data:
|
|||||||
"<MONValueN>"
|
"<MONValueN>"
|
||||||
],
|
],
|
||||||
"cephFS": {
|
"cephFS": {
|
||||||
"subvolumeGroup": "<subvolumegroup for cephfs volumes>"
|
"subvolumeGroup": "<subvolumegroup for cephFS volumes>"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
Loading…
Reference in New Issue
Block a user