mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 02:50:30 +00:00
e2e: allow passing CephFS filesystem name on CLI
A new -filesystem=... option has been added so that the e2e tests can run against environments that do not have a "myfs" CephFS filesystem. Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
parent
ab5ca13586
commit
92866f46fd
@ -101,6 +101,7 @@ are available while running tests:
|
||||
| kubeconfig | Path to kubeconfig containing embedded authinfo (default: $HOME/.kube/config) |
|
||||
| timeout | Panic test binary after duration d (default 0, timeout disabled) |
|
||||
| v | Verbose: print additional output |
|
||||
| filesystem | Name of the CephFS filesystem (default: "myfs") |
|
||||
|
||||
## E2E for snapshot
|
||||
|
||||
|
@ -40,7 +40,7 @@ const (
|
||||
|
||||
// validateSubvolumegroup validates whether subvolumegroup is present.
|
||||
func validateSubvolumegroup(f *framework.Framework, subvolgrp string) error {
|
||||
cmd := fmt.Sprintf("ceph fs subvolumegroup getpath myfs %s", subvolgrp)
|
||||
cmd := fmt.Sprintf("ceph fs subvolumegroup getpath %s %s", fileSystemName, subvolgrp)
|
||||
stdOut, stdErr, err := execCommandInToolBoxPod(f, cmd, rookNamespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to exec command in toolbox: %w", err)
|
||||
@ -67,7 +67,7 @@ func createCephfsStorageClass(
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
sc.Parameters["fsName"] = "myfs"
|
||||
sc.Parameters["fsName"] = fileSystemName
|
||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-namespace"] = cephCSINamespace
|
||||
sc.Parameters["csi.storage.k8s.io/provisioner-secret-name"] = cephFSProvisionerSecretName
|
||||
|
||||
|
@ -43,6 +43,7 @@ func init() {
|
||||
flag.StringVar(&upgradeVersion, "upgrade-version", "v3.5.1", "target version for upgrade testing")
|
||||
flag.StringVar(&cephCSINamespace, "cephcsi-namespace", defaultNs, "namespace in which cephcsi deployed")
|
||||
flag.StringVar(&rookNamespace, "rook-namespace", "rook-ceph", "namespace in which rook is deployed")
|
||||
flag.StringVar(&fileSystemName, "filesystem", "myfs", "CephFS filesystem to use")
|
||||
setDefaultKubeconfig()
|
||||
|
||||
// Register framework flags, then handle flags
|
||||
|
@ -331,7 +331,6 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
var (
|
||||
cephFsVolName = "testSubVol"
|
||||
groupName = "testGroup"
|
||||
fsName = "myfs"
|
||||
pvName = "pv-name"
|
||||
pvcName = "pvc-name"
|
||||
namespace = f.UniqueName
|
||||
@ -361,7 +360,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
size := "4294967296"
|
||||
|
||||
// create subvolumegroup, command will work even if group is already present.
|
||||
cmd := fmt.Sprintf("ceph fs subvolumegroup create %s %s", fsName, groupName)
|
||||
cmd := fmt.Sprintf("ceph fs subvolumegroup create %s %s", fileSystemName, groupName)
|
||||
|
||||
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
@ -372,7 +371,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
}
|
||||
|
||||
// create subvolume
|
||||
cmd = fmt.Sprintf("ceph fs subvolume create %s %s %s --size %s", fsName, cephFsVolName, groupName, size)
|
||||
cmd = fmt.Sprintf("ceph fs subvolume create %s %s %s --size %s", fileSystemName, cephFsVolName, groupName, size)
|
||||
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -382,7 +381,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
}
|
||||
|
||||
// get rootpath
|
||||
cmd = fmt.Sprintf("ceph fs subvolume getpath %s %s %s", fsName, cephFsVolName, groupName)
|
||||
cmd = fmt.Sprintf("ceph fs subvolume getpath %s %s %s", fileSystemName, cephFsVolName, groupName)
|
||||
rootPath, e, err := execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -415,7 +414,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
}
|
||||
|
||||
opt["clusterID"] = fsID
|
||||
opt["fsName"] = fsName
|
||||
opt["fsName"] = fileSystemName
|
||||
opt["staticVolume"] = strconv.FormatBool(true)
|
||||
opt["rootPath"] = rootPath
|
||||
pv := getStaticPV(
|
||||
@ -474,7 +473,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
}
|
||||
|
||||
// delete subvolume
|
||||
cmd = fmt.Sprintf("ceph fs subvolume rm %s %s %s", fsName, cephFsVolName, groupName)
|
||||
cmd = fmt.Sprintf("ceph fs subvolume rm %s %s %s", fileSystemName, cephFsVolName, groupName)
|
||||
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -484,7 +483,7 @@ func validateCephFsStaticPV(f *framework.Framework, appPath, scPath string) erro
|
||||
}
|
||||
|
||||
// delete subvolume group
|
||||
cmd = fmt.Sprintf("ceph fs subvolumegroup rm %s %s", fsName, groupName)
|
||||
cmd = fmt.Sprintf("ceph fs subvolumegroup rm %s %s", fileSystemName, groupName)
|
||||
_, e, err = execCommandInPod(f, cmd, rookNamespace, &listOpt)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -526,7 +526,7 @@ func pvcDeleteWhenPoolNotFound(pvcPath string, cephFS bool, f *framework.Framewo
|
||||
return err
|
||||
}
|
||||
// delete cephFS filesystem
|
||||
err = deletePool("myfs", cephFS, f)
|
||||
err = deletePool(fileSystemName, cephFS, f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user