mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 11:00:25 +00:00
e2e: Add e2e test cases for rbd rados namespace
These test cases are will be executed against a rados namespace. - Create a PVC and bind it to an app. - Resize block PVC and check device size. - Create a PVC clone and bind it to an app. Signed-off-by: Mehdy Khoshnoody <mehdy.khoshnoody@gmail.com>
This commit is contained in:
parent
3081eabba5
commit
204487383d
94
e2e/rbd.go
94
e2e/rbd.go
@ -958,6 +958,100 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
})
|
||||
|
||||
By("ensuring all operations will work within a rados namespace", func() {
|
||||
updateConfigMap := func(radosNS string) {
|
||||
radosNamespace = radosNS
|
||||
deleteConfigMap(rbdDirPath)
|
||||
createConfigMap(rbdDirPath, f.ClientSet, f)
|
||||
createRadosNamespace(f)
|
||||
|
||||
// delete csi pods
|
||||
err := deletePodWithLabel("app in (ceph-csi-rbd, csi-rbdplugin, csi-rbdplugin-provisioner)",
|
||||
cephCSINamespace, false)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
// wait for csi pods to come up
|
||||
err = waitForDaemonSets(rbdDaemonsetName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
err = waitForDeploymentComplete(rbdDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
updateConfigMap("e2e-ns")
|
||||
|
||||
// Create a PVC and Bind it to an app within the namesapce
|
||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||
|
||||
v, err := f.ClientSet.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to get server version with error %v", err)
|
||||
Fail(err.Error())
|
||||
}
|
||||
|
||||
// Resize Block PVC and check Device size within the namespace
|
||||
// Block PVC resize is supported in kubernetes 1.16+
|
||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "16") {
|
||||
err = resizePVCAndValidateSize(rawPvcPath, rawAppPath, f)
|
||||
if err != nil {
|
||||
e2elog.Logf("failed to resize block PVC %v", err)
|
||||
Fail(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Create a PVC clone and bind it to an app within the namespace
|
||||
// snapshot beta is only supported from v1.17+
|
||||
if v.Major > "1" || (v.Major == "1" && v.Minor >= "17") {
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
|
||||
pvc.Namespace = f.UniqueName
|
||||
e2elog.Logf("The PVC template %+v", pvc)
|
||||
err = createPVCAndvalidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
// validate created backend rbd images
|
||||
images := listRBDImages(f)
|
||||
if len(images) != 1 {
|
||||
e2elog.Logf("backend image count %d expected image count %d", len(images), 1)
|
||||
Fail("validate backend image failed")
|
||||
}
|
||||
snap := getSnapshot(snapshotPath)
|
||||
snap.Namespace = f.UniqueName
|
||||
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
|
||||
err = createSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
expectedImages := len(images) + 1
|
||||
images = listRBDImages(f)
|
||||
if len(images) != expectedImages {
|
||||
e2elog.Logf("backend images not matching kubernetes resource count,image count %d kubernetes resource count %d", len(images), 2)
|
||||
Fail("validate backend images failed")
|
||||
}
|
||||
|
||||
validatePVCAndAppBinding(pvcClonePath, appClonePath, f)
|
||||
|
||||
err = deleteSnapshot(&snap, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
err = deletePVCAndValidatePV(f.ClientSet, pvc, deployTimeout)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
updateConfigMap("")
|
||||
})
|
||||
|
||||
By("Mount pvc as readonly in pod", func() {
|
||||
// create pvc and bind it to an app
|
||||
pvc, err := loadPVC(pvcPath)
|
||||
|
@ -99,7 +99,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
||||
fsID = strings.Trim(fsID, "\n")
|
||||
size := "4Gi"
|
||||
// create rbd image
|
||||
cmd := fmt.Sprintf("rbd create %s --size=%d --pool=%s --image-feature=layering", rbdImageName, 4096, defaultRBDPool)
|
||||
cmd := fmt.Sprintf("rbd create %s --size=%d --image-feature=layering %s", rbdImageName, 4096, rbdOptions(defaultRBDPool))
|
||||
|
||||
_, e = execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if e != "" {
|
||||
@ -109,6 +109,9 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
||||
opt["imageFeatures"] = "layering"
|
||||
opt["pool"] = defaultRBDPool
|
||||
opt["staticVolume"] = "true"
|
||||
if radosNamespace != "" {
|
||||
opt["radosNamespace"] = radosNamespace
|
||||
}
|
||||
|
||||
pv := getStaticPV(pvName, rbdImageName, size, "csi-rbd-secret", cephCSINamespace, sc, "rbd.csi.ceph.com", isBlock, opt)
|
||||
|
||||
@ -151,7 +154,7 @@ func validateRBDStaticPV(f *framework.Framework, appPath string, isBlock bool) e
|
||||
return err
|
||||
}
|
||||
|
||||
cmd = fmt.Sprintf("rbd rm %s --pool=%s", rbdImageName, defaultRBDPool)
|
||||
cmd = fmt.Sprintf("rbd rm %s %s", rbdImageName, rbdOptions(defaultRBDPool))
|
||||
execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
return nil
|
||||
}
|
||||
|
73
e2e/utils.go
73
e2e/utils.go
@ -59,6 +59,7 @@ var (
|
||||
upgradeVersion string
|
||||
cephCSINamespace string
|
||||
rookNamespace string
|
||||
radosNamespace string
|
||||
ns string
|
||||
vaultAddr string
|
||||
poll = 2 * time.Second
|
||||
@ -330,6 +331,25 @@ func createRBDStorageClass(c kubernetes.Interface, f *framework.Framework, scOpt
|
||||
Expect(err).Should(BeNil())
|
||||
}
|
||||
|
||||
func createRadosNamespace(f *framework.Framework) {
|
||||
stdOut, stdErr := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd namespace ls --pool=%s", defaultRBDPool), rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
if !strings.Contains(stdOut, radosNamespace) {
|
||||
_, stdErr = execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd namespace create %s", rbdOptions(defaultRBDPool)), rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
}
|
||||
stdOut, stdErr = execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd namespace ls --pool=%s", rbdTopologyPool), rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
if !strings.Contains(stdOut, radosNamespace) {
|
||||
_, stdErr = execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd namespace create %s", rbdOptions(rbdTopologyPool)), rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
}
|
||||
}
|
||||
|
||||
func deleteConfigMap(pluginPath string) {
|
||||
path := pluginPath + configMap
|
||||
_, err := framework.RunKubectl(cephCSINamespace, "delete", "-f", path, ns)
|
||||
@ -351,8 +371,9 @@ func createConfigMap(pluginPath string, c kubernetes.Interface, f *framework.Fra
|
||||
// get mon list
|
||||
mons := getMons(rookNamespace, c)
|
||||
conmap := []util.ClusterInfo{{
|
||||
ClusterID: fsID,
|
||||
Monitors: mons,
|
||||
ClusterID: fsID,
|
||||
Monitors: mons,
|
||||
RadosNamespace: radosNamespace,
|
||||
}}
|
||||
if upgradeTesting {
|
||||
subvolumegroup = "csi"
|
||||
@ -780,7 +801,7 @@ func validateEncryptedPVCAndAppBinding(pvcPath, appPath, kms string, f *framewor
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
rbdImageSpec := fmt.Sprintf("%s/%s", defaultRBDPool, imageData.imageName)
|
||||
rbdImageSpec := imageSpec(defaultRBDPool, imageData.imageName)
|
||||
encryptedState, err := getImageMeta(rbdImageSpec, ".rbd.csi.ceph.com/encrypted", f)
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
@ -916,7 +937,8 @@ func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeC
|
||||
}
|
||||
|
||||
func listRBDImages(f *framework.Framework) []string {
|
||||
stdout, stdErr := execCommandInToolBoxPod(f, fmt.Sprintf("rbd ls --pool=%s --format=json", defaultRBDPool), rookNamespace)
|
||||
stdout, stdErr := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd ls --format=json %s", rbdOptions(defaultRBDPool)), rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
var imgInfos []string
|
||||
|
||||
@ -1019,7 +1041,7 @@ func deleteBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeClaim
|
||||
return err
|
||||
}
|
||||
|
||||
cmd := fmt.Sprintf("rbd rm %s --pool=%s", imageData.imageName, defaultRBDPool)
|
||||
cmd := fmt.Sprintf("rbd rm %s %s", rbdOptions(defaultRBDPool), imageData.imageName)
|
||||
execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
return nil
|
||||
}
|
||||
@ -1148,10 +1170,15 @@ func getPVCImageInfoInPool(f *framework.Framework, pvc *v1.PersistentVolumeClaim
|
||||
return "", err
|
||||
}
|
||||
|
||||
stdOut, stdErr := execCommandInToolBoxPod(f, "rbd info "+pool+"/"+imageData.imageName, rookNamespace)
|
||||
stdOut, stdErr := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rbd info %s", imageSpec(pool, imageData.imageName)), rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
|
||||
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf("found image %s in pool %s namespace %s", imageData.imageName, pool, radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found image %s in pool %s", imageData.imageName, pool)
|
||||
}
|
||||
|
||||
return stdOut, nil
|
||||
}
|
||||
@ -1181,10 +1208,15 @@ func checkPVCImageJournalInPool(f *framework.Framework, pvc *v1.PersistentVolume
|
||||
return err
|
||||
}
|
||||
|
||||
_, stdErr := execCommandInToolBoxPod(f, "rados listomapkeys -p "+pool+" csi.volume."+imageData.imageID, rookNamespace)
|
||||
_, stdErr := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rados listomapkeys %s csi.volume.%s", rbdOptions(pool), imageData.imageID), rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
|
||||
e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool)
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf("found image journal %s in pool %s namespace %s", "csi.volume."+imageData.imageID, pool, radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found image journal %s in pool %s", "csi.volume."+imageData.imageID, pool)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1195,10 +1227,15 @@ func checkPVCCSIJournalInPool(f *framework.Framework, pvc *v1.PersistentVolumeCl
|
||||
return err
|
||||
}
|
||||
|
||||
_, stdErr := execCommandInToolBoxPod(f, "rados getomapval -p "+pool+" csi.volumes.default csi.volume."+imageData.pvName, rookNamespace)
|
||||
_, stdErr := execCommandInToolBoxPod(f,
|
||||
fmt.Sprintf("rados getomapval %s csi.volumes.default csi.volume.%s", rbdOptions(pool), imageData.pvName), rookNamespace)
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
|
||||
e2elog.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool)
|
||||
if radosNamespace != "" {
|
||||
e2elog.Logf("found CSI journal entry %s in pool %s namespace %s", "csi.volume."+imageData.pvName, pool, radosNamespace)
|
||||
} else {
|
||||
e2elog.Logf("found CSI journal entry %s in pool %s", "csi.volume."+imageData.pvName, pool)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@ -1307,3 +1344,17 @@ func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func imageSpec(pool, image string) string {
|
||||
if radosNamespace != "" {
|
||||
return pool + "/" + radosNamespace + "/" + image
|
||||
}
|
||||
return pool + "/" + image
|
||||
}
|
||||
|
||||
func rbdOptions(pool string) string {
|
||||
if radosNamespace != "" {
|
||||
return "--pool=" + pool + " --namespace " + radosNamespace
|
||||
}
|
||||
return "--pool=" + pool
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user