mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-23 05:20:19 +00:00
1aaccd0e5a
as we are adding the subvolumepath to the PV spec. this commits adds an E2E testing to verify it. Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
154 lines
4.8 KiB
Go
154 lines
4.8 KiB
Go
package e2e
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"strings"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/client-go/kubernetes"
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
|
)
|
|
|
|
const (
|
|
adminUser = "admin"
|
|
)
|
|
|
|
// validateSubvolumegroup validates whether subvolumegroup is present.
|
|
func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error {
|
|
cmd := fmt.Sprintf("ceph fs subvolumegroup getpath myfs %s", subvolgrp)
|
|
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if stdErr != "" {
|
|
return fmt.Errorf("failed to getpath for subvolumegroup %s with error %v", subvolgrp, stdErr)
|
|
}
|
|
expectedGrpPath := "/volumes/" + subvolgrp
|
|
stdOut = strings.TrimSpace(stdOut)
|
|
if stdOut != expectedGrpPath {
|
|
return fmt.Errorf("error unexpected group path. Found: %s", stdOut)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func createCephfsStorageClass(c kubernetes.Interface, f *framework.Framework, enablePool bool, params map[string]string) error {
|
|
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "storageclass.yaml")
|
|
sc, err := getStorageClass(scPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
sc.Parameters["fsName"] = "myfs"
|
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = rookNamespace
|
|
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephfsProvisionerSecretName
|
|
|
|
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-namespace"] = rookNamespace
|
|
sc.Parameters["csi.storage.k8s.io/controller-expand-secret-name"] = cephfsProvisionerSecretName
|
|
|
|
sc.Parameters["csi.storage.k8s.io/node-stage-secret-namespace"] = rookNamespace
|
|
sc.Parameters["csi.storage.k8s.io/node-stage-secret-name"] = cephfsNodePluginSecretName
|
|
|
|
if enablePool {
|
|
sc.Parameters["pool"] = "myfs-data0"
|
|
}
|
|
|
|
// overload any parameters that were passed
|
|
if params == nil {
|
|
// create an empty params, so that params["clusterID"] below
|
|
// does not panic
|
|
params = map[string]string{}
|
|
}
|
|
for param, value := range params {
|
|
sc.Parameters[param] = value
|
|
}
|
|
|
|
// fetch and set fsID from the cluster if not set in params
|
|
if _, found := params["clusterID"]; !found {
|
|
fsID, stdErr, failErr := execCommandInToolBoxPod(f, "ceph fsid", rookNamespace)
|
|
if failErr != nil {
|
|
return failErr
|
|
}
|
|
if stdErr != "" {
|
|
return fmt.Errorf("error getting fsid %v", stdErr)
|
|
}
|
|
sc.Parameters["clusterID"] = strings.Trim(fsID, "\n")
|
|
}
|
|
sc.Namespace = cephCSINamespace
|
|
_, err = c.StorageV1().StorageClasses().Create(context.TODO(), &sc, metav1.CreateOptions{})
|
|
return err
|
|
}
|
|
|
|
func createCephfsSecret(c kubernetes.Interface, f *framework.Framework) error {
|
|
scPath := fmt.Sprintf("%s/%s", cephfsExamplePath, "secret.yaml")
|
|
sc, err := getSecret(scPath)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
adminKey, stdErr, err := execCommandInToolBoxPod(f, "ceph auth get-key client.admin", rookNamespace)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if stdErr != "" {
|
|
return fmt.Errorf("error getting admin key %v", stdErr)
|
|
}
|
|
sc.StringData["adminID"] = adminUser
|
|
sc.StringData["adminKey"] = adminKey
|
|
delete(sc.StringData, "userID")
|
|
delete(sc.StringData, "userKey")
|
|
sc.Namespace = cephCSINamespace
|
|
_, err = c.CoreV1().Secrets(cephCSINamespace).Create(context.TODO(), &sc, metav1.CreateOptions{})
|
|
return err
|
|
}
|
|
|
|
func deleteBackingCephFSVolume(f *framework.Framework, pvc *v1.PersistentVolumeClaim) error {
|
|
imageData, err := getImageInfoFromPVC(pvc.Namespace, pvc.Name, f)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
_, stdErr, err := execCommandInToolBoxPod(f, "ceph fs subvolume rm myfs "+imageData.imageName+" "+subvolumegroup, rookNamespace)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if stdErr != "" {
|
|
return fmt.Errorf("error deleting backing volume %s %v", imageData.imageName, stdErr)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type cephfsSubVolume struct {
|
|
Name string `json:"name"`
|
|
}
|
|
|
|
func listCephFSSubVolumes(f *framework.Framework, filesystem, groupname string) ([]cephfsSubVolume, error) {
|
|
var subVols []cephfsSubVolume
|
|
stdout, stdErr, err := execCommandInToolBoxPod(f, fmt.Sprintf("ceph fs subvolume ls %s --group_name=%s --format=json", filesystem, groupname), rookNamespace)
|
|
if err != nil {
|
|
return subVols, err
|
|
}
|
|
if stdErr != "" {
|
|
return subVols, fmt.Errorf("error listing subolumes %v", stdErr)
|
|
}
|
|
|
|
err = json.Unmarshal([]byte(stdout), &subVols)
|
|
if err != nil {
|
|
return subVols, err
|
|
}
|
|
return subVols, nil
|
|
}
|
|
|
|
// getSubvolumepath validates whether subvolumegroup is present.
|
|
func getSubvolumePath(f *framework.Framework, filesystem, subvolgrp, subvolume string) (string, error) {
|
|
cmd := fmt.Sprintf("ceph fs subvolume getpath %s %s --group_name=%s", filesystem, subvolume, subvolgrp)
|
|
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
if stdErr != "" {
|
|
return "", fmt.Errorf("failed to getpath for subvolume %s with error %s", subvolume, stdErr)
|
|
}
|
|
return strings.TrimSpace(stdOut), nil
|
|
}
|