mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-18 02:50:30 +00:00
cleanup: resolves gofumpt issues in e2e
This commit resolves gofumpt issues in e2e folder. Updates: #1586 Signed-off-by: Yati Padia <ypadia@redhat.com>
This commit is contained in:
parent
c4372b8567
commit
7f5df7c940
@ -857,7 +857,6 @@ var _ = Describe("cephfs", func() {
|
||||
// create multiple PVC from same snapshot
|
||||
wg.Add(totalCount)
|
||||
for i := 0; i < totalCount; i++ {
|
||||
|
||||
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
|
||||
name := fmt.Sprintf("%s%d", f.UniqueName, n)
|
||||
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
|
||||
@ -1184,8 +1183,6 @@ var _ = Describe("cephfs", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete user %s with error %v", keyringCephFSNodePluginUsername, err)
|
||||
}
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
})
|
||||
|
@ -104,7 +104,8 @@ func createCustomConfigMap(c kubernetes.Interface, pluginPath string, subvolgrpI
|
||||
{
|
||||
ClusterID: clusterID[1],
|
||||
Monitors: mons,
|
||||
}}
|
||||
},
|
||||
}
|
||||
for i := 0; i < len(subvolgrpInfo); i++ {
|
||||
conmap[i].CephFS.SubvolumeGroup = subvolgrpInfo[clusterID[i]]
|
||||
}
|
||||
|
19
e2e/rbd.go
19
e2e/rbd.go
@ -914,7 +914,8 @@ var _ = Describe("RBD", func() {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, map[string]string{
|
||||
"thickProvision": "true"}, deletePolicy)
|
||||
"thickProvision": "true",
|
||||
}, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
}
|
||||
@ -1117,7 +1118,6 @@ var _ = Describe("RBD", func() {
|
||||
err = resizePVCAndValidateSize(pvcPath, appPath, f)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to resize filesystem PVC with error %v", err)
|
||||
|
||||
}
|
||||
// validate created backend rbd images
|
||||
validateRBDImageCount(f, 0, defaultRBDPool)
|
||||
@ -1495,7 +1495,6 @@ var _ = Describe("RBD", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to delete pool %s with error %v", clonePool, err)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
By("create ROX PVC clone and mount it to multiple pods", func() {
|
||||
@ -1863,10 +1862,9 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
}()
|
||||
|
||||
var pvc = &v1.PersistentVolumeClaim{}
|
||||
pvc, err = loadPVC(pvcPath)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to load PVC with error %v", err)
|
||||
pvc, pvcErr := loadPVC(pvcPath)
|
||||
if pvcErr != nil {
|
||||
e2elog.Failf("failed to load PVC with error %v", pvcErr)
|
||||
}
|
||||
|
||||
pvc.Namespace = f.UniqueName
|
||||
@ -1995,7 +1993,8 @@ var _ = Describe("RBD", func() {
|
||||
e2elog.Failf("failed to delete storageclass with error %v", err)
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, map[string]string{
|
||||
"thickProvision": "true"}, deletePolicy)
|
||||
"thickProvision": "true",
|
||||
}, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
}
|
||||
@ -2036,7 +2035,8 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
err = createRBDStorageClass(f.ClientSet, f, defaultSCName, nil, map[string]string{
|
||||
"mapOptions": "lock_on_read,queue_depth=1024",
|
||||
"unmapOptions": "force"}, deletePolicy)
|
||||
"unmapOptions": "force",
|
||||
}, deletePolicy)
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
}
|
||||
@ -2069,7 +2069,6 @@ var _ = Describe("RBD", func() {
|
||||
if err != nil {
|
||||
e2elog.Failf("failed to create storageclass with error %v", err)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
By("validate stale images in trash", func() {
|
||||
|
@ -590,7 +590,7 @@ func sparsifyBackingRBDImage(f *framework.Framework, pvc *v1.PersistentVolumeCla
|
||||
}
|
||||
|
||||
func deletePool(name string, cephfs bool, f *framework.Framework) error {
|
||||
var cmds = []string{}
|
||||
cmds := []string{}
|
||||
if cephfs {
|
||||
// ceph fs fail
|
||||
// ceph fs rm myfs --yes-i-really-mean-it
|
||||
|
@ -147,7 +147,6 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
|
||||
Context("Cephfs Upgrade Test", func() {
|
||||
It("Cephfs Upgrade Test", func() {
|
||||
|
||||
By("checking provisioner deployment is running", func() {
|
||||
err = waitForDeploymentComplete(cephfsDeploymentName, cephCSINamespace, f.ClientSet, deployTimeout)
|
||||
if err != nil {
|
||||
@ -410,7 +409,6 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
e2elog.Failf("failed to check directory size with error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
By("delete pvc and app")
|
||||
@ -429,6 +427,5 @@ var _ = Describe("CephFS Upgrade Testing", func() {
|
||||
e2elog.Failf("failed to delete user %s with error %v", keyringCephFSNodePluginUsername, err)
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
@ -417,7 +417,6 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
e2elog.Failf("failed to check directory size with error %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
By("delete pvc and app", func() {
|
||||
@ -437,6 +436,5 @@ var _ = Describe("RBD Upgrade Testing", func() {
|
||||
e2elog.Failf("failed to delete user %s with error %v", keyringRBDNodePluginUsername, err)
|
||||
}
|
||||
})
|
||||
|
||||
})
|
||||
})
|
||||
|
@ -264,7 +264,8 @@ func validateNormalUserPVCAccess(pvcPath string, f *framework.Framework) error {
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc.Name,
|
||||
ReadOnly: false},
|
||||
ReadOnly: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@ -479,7 +480,7 @@ func addTopologyDomainsToDSYaml(template, labels string) string {
|
||||
}
|
||||
|
||||
func oneReplicaDeployYaml(template string) string {
|
||||
var re = regexp.MustCompile(`(\s+replicas:) \d+`)
|
||||
re := regexp.MustCompile(`(\s+replicas:) \d+`)
|
||||
return re.ReplaceAllString(template, `$1 1`)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user