mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 10:33:35 +00:00
cephfs: Add support for multiple subvolumegroups
With the current code base, the subvolumegroup will be created once, and even for a different cluster, subvolumegroup creation is not allowed again. Added support multiple subvolumegroups creation by validating one subvolumegroup creation per cluster. Fixes: #1123 Signed-off-by: Yug Gupta <ygupta@redhat.com>
This commit is contained in:
@ -176,12 +176,12 @@ var _ = Describe("cephfs", func() {
|
||||
})
|
||||
|
||||
By("create a storage class with pool and a PVC then Bind it to an app", func() {
|
||||
createCephfsStorageClass(f.ClientSet, f, true)
|
||||
createCephfsStorageClass(f.ClientSet, f, true, "")
|
||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
})
|
||||
|
||||
createCephfsStorageClass(f.ClientSet, f, false)
|
||||
createCephfsStorageClass(f.ClientSet, f, false, "")
|
||||
|
||||
By("create and delete a PVC", func() {
|
||||
By("create a PVC and Bind it to an app", func() {
|
||||
@ -258,6 +258,38 @@ var _ = Describe("cephfs", func() {
|
||||
}
|
||||
})
|
||||
|
||||
By("validate multiple subvolumegroup creation", func() {
|
||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
// re-define configmap with information of multiple clusters.
|
||||
subvolgrpInfo := map[string]string{
|
||||
"clusterID-1": "subvolgrp1",
|
||||
"clusterID-2": "subvolgrp2",
|
||||
}
|
||||
createCustomConfigMap(f.ClientSet, cephfsDirPath, subvolgrpInfo)
|
||||
createCephfsStorageClass(f.ClientSet, f, false, "clusterID-1")
|
||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
// verify subvolumegroup creation.
|
||||
err := validateSubvolumegroup(f, "subvolgrp1")
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
|
||||
// create resources and verify subvolume group creation
|
||||
// for the second cluster.
|
||||
createCephfsStorageClass(f.ClientSet, f, false, "clusterID-2")
|
||||
validatePVCAndAppBinding(pvcPath, appPath, f)
|
||||
deleteResource(cephfsExamplePath + "storageclass.yaml")
|
||||
err = validateSubvolumegroup(f, "subvolgrp2")
|
||||
if err != nil {
|
||||
Fail(err.Error())
|
||||
}
|
||||
deleteConfigMap(cephfsDirPath)
|
||||
})
|
||||
|
||||
createConfigMap(cephfsDirPath, f.ClientSet, f)
|
||||
createCephfsStorageClass(f.ClientSet, f, false, "")
|
||||
|
||||
By("Resize PVC and check application directory size", func() {
|
||||
v, err := f.ClientSet.Discovery().ServerVersion()
|
||||
if err != nil {
|
||||
|
57
e2e/utils.go
57
e2e/utils.go
@ -258,7 +258,7 @@ func getStorageClass(path string) scv1.StorageClass {
|
||||
return sc
|
||||
}
|
||||
|
||||
func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, enablePool bool) {
|
||||
func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, enablePool bool, clusterID string) {
|
||||
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml")
|
||||
sc := getStorageClass(scPath)
|
||||
sc.Parameters["fsName"] = "myfs"
|
||||
@ -278,6 +278,10 @@ func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, en
|
||||
Expect(stdErr).Should(BeEmpty())
|
||||
// remove new line present in fsID
|
||||
fsID = strings.Trim(fsID, "\n")
|
||||
|
||||
if clusterID != "" {
|
||||
fsID = clusterID
|
||||
}
|
||||
sc.Namespace = cephCSINamespace
|
||||
sc.Parameters["clusterID"] = fsID
|
||||
_, err := c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
||||
@ -1207,3 +1211,54 @@ func addTopologyDomainsToDSYaml(template, labels string) string {
|
||||
return strings.ReplaceAll(template, "# - \"--domainlabels=failure-domain/region,failure-domain/zone\"",
|
||||
"- \"--domainlabels="+labels+"\"")
|
||||
}
|
||||
|
||||
// createCustomConfigMap provides multiple clusters information.
|
||||
func createCustomConfigMap(c kubernetes.Interface, pluginPath string, subvolgrpInfo map[string]string) {
|
||||
path := pluginPath + configMap
|
||||
cm := v1.ConfigMap{}
|
||||
err := unmarshal(path, &cm)
|
||||
Expect(err).Should(BeNil())
|
||||
|
||||
// get mon list
|
||||
mons := getMons(rookNamespace, c)
|
||||
// get clusterIDs
|
||||
var clusterID []string
|
||||
for key := range subvolgrpInfo {
|
||||
clusterID = append(clusterID, key)
|
||||
}
|
||||
conmap := []util.ClusterInfo{
|
||||
{
|
||||
ClusterID: clusterID[0],
|
||||
Monitors: mons,
|
||||
},
|
||||
{
|
||||
ClusterID: clusterID[1],
|
||||
Monitors: mons,
|
||||
}}
|
||||
for i := 0; i < len(subvolgrpInfo); i++ {
|
||||
conmap[i].CephFS.SubvolumeGroup = subvolgrpInfo[clusterID[i]]
|
||||
}
|
||||
data, err := json.Marshal(conmap)
|
||||
Expect(err).Should(BeNil())
|
||||
cm.Data["config.json"] = string(data)
|
||||
cm.Namespace = cephCSINamespace
|
||||
// since a configmap is already created, update the existing configmap
|
||||
_, updateErr := c.CoreV1().ConfigMaps(cephCSINamespace).Update(context.TODO(), &cm, metav1.UpdateOptions{})
|
||||
Expect(updateErr).Should(BeNil())
|
||||
}
|
||||
|
||||
// validateSubvolumegroup validates whether subvolumegroup is present.
|
||||
func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error {
|
||||
cmd := fmt.Sprintf("ceph fs subvolumegroup getpath myfs %s", subvolgrp)
|
||||
stdOut, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
Expect(err).Should(BeEmpty())
|
||||
if err != "" {
|
||||
return fmt.Errorf("error subvolumegroup %s doesn't exist", subvolgrp)
|
||||
}
|
||||
expectedGrpPath := "/volumes/" + subvolgrp
|
||||
stdOut = strings.TrimSpace(stdOut)
|
||||
if stdOut != expectedGrpPath {
|
||||
return fmt.Errorf("error unexpected group path. Found: %s", stdOut)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user