mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-21 22:00:19 +00:00
e2e: cephfs rados namespace test
Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
parent
86759d4653
commit
3aa91cfa6f
116
e2e/cephfs.go
116
e2e/cephfs.go
@ -2491,6 +2491,122 @@ var _ = Describe(cephfsType, func() {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
By("verify rados objects are within a namespace", func() {
|
||||||
|
updateRadosNamespace := func(radosNamespaceName string) {
|
||||||
|
framework.Logf("updating configmap with rados namespace %s", radosNamespace)
|
||||||
|
radosNamespace = radosNamespaceName
|
||||||
|
err := deleteConfigMap(cephFSDirPath)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete configmap:: %v", err)
|
||||||
|
}
|
||||||
|
err = createConfigMap(cephFSDirPath, f.ClientSet, f)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create configmap: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete csi pods
|
||||||
|
err = deletePodWithLabel("app in (ceph-csi-cephfs, csi-cephfsplugin, csi-cephfsplugin-provisioner)",
|
||||||
|
cephCSINamespace, false)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete pods with labels: %v", err)
|
||||||
|
}
|
||||||
|
// wait for csi pods to come up
|
||||||
|
err = waitForDaemonSets(cephFSDeamonSetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("timeout waiting for daemonset pods: %v", err)
|
||||||
|
}
|
||||||
|
err = waitForDeploymentComplete(f.ClientSet, cephFSDeploymentName, cephCSINamespace, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("timeout waiting for deployment pods: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// radosNamespace is a global variable, so we need to save the old value
|
||||||
|
// and restore it after the test.
|
||||||
|
oldRadosNamespace := radosNamespace
|
||||||
|
newRadosNamespace := "cephfs-ns"
|
||||||
|
|
||||||
|
updateRadosNamespace(newRadosNamespace)
|
||||||
|
defer func() {
|
||||||
|
updateRadosNamespace(oldRadosNamespace)
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete CephFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
err = createCephfsStorageClass(f.ClientSet, f, true, nil)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create CephFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
// create a PVC and bind it to an app
|
||||||
|
pvc, pod, err := createPVCAndAppBinding(pvcPath, appPath, f, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to validate CephFS pvc and application binding: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// snapshot test
|
||||||
|
err = deleteResource(cephFSExamplePath + "snapshotclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete CephFS snapshotclass: %v", err)
|
||||||
|
}
|
||||||
|
err = createCephFSSnapshotClass(f)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create CephFS snapshot class: %v", err)
|
||||||
|
}
|
||||||
|
snap := getSnapshot(snapshotPath)
|
||||||
|
snap.Namespace = f.UniqueName
|
||||||
|
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||||
|
snap.Name = f.UniqueName
|
||||||
|
err = createSnapshot(&snap, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create snapshot (%s): %v", snap.Name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// restore pvc test
|
||||||
|
pvcClone, err := loadPVC(pvcClonePath)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to load PVC: %v", err)
|
||||||
|
}
|
||||||
|
pvcClone.Namespace = f.UniqueName
|
||||||
|
pvcClone.Spec.DataSource.Name = snap.Name
|
||||||
|
// create PVC from the snapshot
|
||||||
|
err = createPVCAndvalidatePV(f.ClientSet, pvcClone, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to create pvc clone: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate OMAP count
|
||||||
|
validateOmapCount(f, 2, cephfsType, metadataPool, volumesType)
|
||||||
|
validateOmapCount(f, 1, cephfsType, metadataPool, snapsType)
|
||||||
|
|
||||||
|
// delete resources
|
||||||
|
err = deletePod(pod.Name, pod.Namespace, f.ClientSet, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete application: %v", err)
|
||||||
|
}
|
||||||
|
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete PVC: %v", err)
|
||||||
|
}
|
||||||
|
err = deletePVCAndValidatePV(f.ClientSet, pvcClone, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete pvc clone: %v", err)
|
||||||
|
}
|
||||||
|
err = deleteSnapshot(&snap, deployTimeout)
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete snapshot (%s): %v", f.UniqueName, err)
|
||||||
|
}
|
||||||
|
err = deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
|
if err != nil {
|
||||||
|
framework.Failf("failed to delete CephFS storageclass: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate OMAP count
|
||||||
|
validateOmapCount(f, 0, cephfsType, metadataPool, volumesType)
|
||||||
|
validateOmapCount(f, 0, cephfsType, metadataPool, snapsType)
|
||||||
|
})
|
||||||
|
|
||||||
// FIXME: in case NFS testing is done, prevent deletion
|
// FIXME: in case NFS testing is done, prevent deletion
|
||||||
// of the CephFS filesystem and related pool. This can
|
// of the CephFS filesystem and related pool. This can
|
||||||
// probably be addressed in a nicer way, making sure
|
// probably be addressed in a nicer way, making sure
|
||||||
|
@ -187,6 +187,15 @@ func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeC
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func cephfsOptions(pool string) string {
|
||||||
|
if radosNamespace != "" {
|
||||||
|
return "--pool=" + pool + " --namespace=" + radosNamespace
|
||||||
|
}
|
||||||
|
|
||||||
|
// default namespace is csi
|
||||||
|
return "--pool=" + pool + " --namespace=csi"
|
||||||
|
}
|
||||||
|
|
||||||
type cephfsSubVolume struct {
|
type cephfsSubVolume struct {
|
||||||
Name string `json:"name"`
|
Name string `json:"name"`
|
||||||
}
|
}
|
||||||
|
@ -60,6 +60,9 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
|
|||||||
RBD: cephcsi.RBD{
|
RBD: cephcsi.RBD{
|
||||||
RadosNamespace: radosNamespace,
|
RadosNamespace: radosNamespace,
|
||||||
},
|
},
|
||||||
|
CephFS: cephcsi.CephFS{
|
||||||
|
RadosNamespace: radosNamespace,
|
||||||
|
},
|
||||||
ReadAffinity: cephcsi.ReadAffinity{
|
ReadAffinity: cephcsi.ReadAffinity{
|
||||||
Enabled: true,
|
Enabled: true,
|
||||||
CrushLocationLabels: []string{
|
CrushLocationLabels: []string{
|
||||||
|
40
e2e/utils.go
40
e2e/utils.go
@ -174,13 +174,11 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
|
|||||||
{
|
{
|
||||||
volumeMode: volumesType,
|
volumeMode: volumesType,
|
||||||
driverType: cephfsType,
|
driverType: cephfsType,
|
||||||
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
|
radosLsCmd: "rados ls " + cephfsOptions(pool),
|
||||||
radosLsCmdFilter: fmt.Sprintf(
|
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.",
|
||||||
"rados ls --pool=%s --namespace csi | grep -v default | grep -v csi.volume.group. | grep -c ^csi.volume.",
|
cephfsOptions(pool)),
|
||||||
pool),
|
radosLsKeysCmd: "rados listomapkeys csi.volumes.default " + cephfsOptions(pool),
|
||||||
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi", pool),
|
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", cephfsOptions(pool)),
|
||||||
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default --pool=%s --namespace csi | wc -l",
|
|
||||||
pool),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
volumeMode: volumesType,
|
volumeMode: volumesType,
|
||||||
@ -193,14 +191,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
|
|||||||
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", rbdOptions(pool)),
|
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.volumes.default %s | wc -l", rbdOptions(pool)),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
volumeMode: snapsType,
|
volumeMode: snapsType,
|
||||||
driverType: cephfsType,
|
driverType: cephfsType,
|
||||||
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
|
radosLsCmd: "rados ls " + cephfsOptions(pool),
|
||||||
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.snap.",
|
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.snap.", cephfsOptions(pool)),
|
||||||
pool),
|
radosLsKeysCmd: "rados listomapkeys csi.snaps.default " + cephfsOptions(pool),
|
||||||
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi", pool),
|
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", cephfsOptions(pool)),
|
||||||
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default --pool=%s --namespace csi | wc -l",
|
|
||||||
pool),
|
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
volumeMode: snapsType,
|
volumeMode: snapsType,
|
||||||
@ -211,14 +207,12 @@ func validateOmapCount(f *framework.Framework, count int, driver, pool, mode str
|
|||||||
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", rbdOptions(pool)),
|
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.snaps.default %s | wc -l", rbdOptions(pool)),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
volumeMode: groupSnapsType,
|
volumeMode: groupSnapsType,
|
||||||
driverType: cephfsType,
|
driverType: cephfsType,
|
||||||
radosLsCmd: fmt.Sprintf("rados ls --pool=%s --namespace csi", pool),
|
radosLsCmd: "rados ls" + cephfsOptions(pool),
|
||||||
radosLsCmdFilter: fmt.Sprintf("rados ls --pool=%s --namespace csi | grep -v default | grep -c ^csi.volume.group.",
|
radosLsCmdFilter: fmt.Sprintf("rados ls %s | grep -v default | grep -c ^csi.volume.group.", cephfsOptions(pool)),
|
||||||
pool),
|
radosLsKeysCmd: "rados listomapkeys csi.groups.default " + cephfsOptions(pool),
|
||||||
radosLsKeysCmd: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi", pool),
|
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default %s | wc -l", cephfsOptions(pool)),
|
||||||
radosLsKeysCmdFilter: fmt.Sprintf("rados listomapkeys csi.groups.default --pool=%s --namespace csi | wc -l",
|
|
||||||
pool),
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user