Merge pull request #300 from red-hat-storage/sync_us--devel

Syncing latest changes from upstream devel for ceph-csi
This commit is contained in:
openshift-merge-bot[bot] 2024-04-30 08:06:41 +00:00 committed by GitHub
commit 1203c564e8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 255 additions and 317 deletions

View File

@ -19,7 +19,7 @@ BASE_IMAGE=quay.io/ceph/ceph:v18
CEPH_VERSION=reef CEPH_VERSION=reef
# standard Golang options # standard Golang options
GOLANG_VERSION=1.21.9 GOLANG_VERSION=1.22.2
GO111MODULE=on GO111MODULE=on
# commitlint version # commitlint version

View File

@ -514,8 +514,6 @@ var _ = Describe(cephfsType, func() {
} }
for kmsID, kmsConf := range kmsToTest { for kmsID, kmsConf := range kmsToTest {
kmsID := kmsID
kmsConf := kmsConf
By("create a storageclass with pool and an encrypted PVC then bind it to an app with "+kmsID, func() { By("create a storageclass with pool and an encrypted PVC then bind it to an app with "+kmsID, func() {
scOpts := map[string]string{ scOpts := map[string]string{
"encrypted": "true", "encrypted": "true",
@ -876,7 +874,7 @@ var _ = Describe(cephfsType, func() {
} }
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
// create PVC and app // create PVC and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
err = createPVCAndApp(name, f, pvc, app, deployTimeout) err = createPVCAndApp(name, f, pvc, app, deployTimeout)
if err != nil { if err != nil {
@ -891,7 +889,7 @@ var _ = Describe(cephfsType, func() {
validateSubvolumeCount(f, totalCount, fileSystemName, subvolumegroup) validateSubvolumeCount(f, totalCount, fileSystemName, subvolumegroup)
validateOmapCount(f, totalCount, cephfsType, metadataPool, volumesType) validateOmapCount(f, totalCount, cephfsType, metadataPool, volumesType)
// delete PVC and app // delete PVC and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
err = deletePVCAndApp(name, f, pvc, app) err = deletePVCAndApp(name, f, pvc, app)
if err != nil { if err != nil {
@ -1454,7 +1452,7 @@ var _ = Describe(cephfsType, func() {
snap.Namespace = f.UniqueName snap.Namespace = f.UniqueName
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
// create snapshot // create snapshot
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, s snapapi.VolumeSnapshot) { go func(n int, s snapapi.VolumeSnapshot) {
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = createSnapshot(&s, deployTimeout) wgErrs[n] = createSnapshot(&s, deployTimeout)
@ -1490,7 +1488,7 @@ var _ = Describe(cephfsType, func() {
// create multiple PVC from same snapshot // create multiple PVC from same snapshot
wg.Add(totalCount) wg.Add(totalCount)
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout) wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
@ -1522,7 +1520,7 @@ var _ = Describe(cephfsType, func() {
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -1550,7 +1548,7 @@ var _ = Describe(cephfsType, func() {
// create clones from different snapshots and bind it to an // create clones from different snapshots and bind it to an
// app // app
wg.Add(totalCount) wg.Add(totalCount)
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -1583,7 +1581,7 @@ var _ = Describe(cephfsType, func() {
wg.Add(totalCount) wg.Add(totalCount)
// delete snapshot // delete snapshot
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, s snapapi.VolumeSnapshot) { go func(n int, s snapapi.VolumeSnapshot) {
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = deleteSnapshot(&s, deployTimeout) wgErrs[n] = deleteSnapshot(&s, deployTimeout)
@ -1607,7 +1605,7 @@ var _ = Describe(cephfsType, func() {
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -1649,7 +1647,6 @@ var _ = Describe(cephfsType, func() {
if testCephFSFscrypt { if testCephFSFscrypt {
for _, kmsID := range []string{"secrets-metadata-test", "vault-test"} { for _, kmsID := range []string{"secrets-metadata-test", "vault-test"} {
kmsID := kmsID
By("checking encrypted snapshot-backed volume with KMS "+kmsID, func() { By("checking encrypted snapshot-backed volume with KMS "+kmsID, func() {
err := deleteResource(cephFSExamplePath + "storageclass.yaml") err := deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
@ -2234,8 +2231,6 @@ var _ = Describe(cephfsType, func() {
"vault-test": vaultKMS, "vault-test": vaultKMS,
} }
for kmsID, kmsConf := range kmsToTest { for kmsID, kmsConf := range kmsToTest {
kmsID := kmsID
kmsConf := kmsConf
By("create an encrypted PVC-PVC clone and bind it to an app with "+kmsID, func() { By("create an encrypted PVC-PVC clone and bind it to an app with "+kmsID, func() {
err := deleteResource(cephFSExamplePath + "storageclass.yaml") err := deleteResource(cephFSExamplePath + "storageclass.yaml")
if err != nil { if err != nil {
@ -2313,7 +2308,7 @@ var _ = Describe(cephfsType, func() {
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
wg.Add(totalCount) wg.Add(totalCount)
// create clone and bind it to an app // create clone and bind it to an app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout) wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
@ -2345,7 +2340,7 @@ var _ = Describe(cephfsType, func() {
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name

View File

@ -75,7 +75,6 @@ Error from server (AlreadyExists): error when creating "STDIN": deployments.apps
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := getStdErr(tt.errString); got != tt.expected { if got := getStdErr(tt.errString); got != tt.expected {

View File

@ -528,7 +528,7 @@ var _ = Describe("nfs", func() {
} }
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
// create PVC and app // create PVC and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
err = createPVCAndApp(name, f, pvc, app, deployTimeout) err = createPVCAndApp(name, f, pvc, app, deployTimeout)
if err != nil { if err != nil {
@ -542,7 +542,7 @@ var _ = Describe("nfs", func() {
validateSubvolumeCount(f, totalCount, fileSystemName, defaultSubvolumegroup) validateSubvolumeCount(f, totalCount, fileSystemName, defaultSubvolumegroup)
// delete PVC and app // delete PVC and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
err = deletePVCAndApp(name, f, pvc, app) err = deletePVCAndApp(name, f, pvc, app)
if err != nil { if err != nil {
@ -699,7 +699,7 @@ var _ = Describe("nfs", func() {
snap.Namespace = f.UniqueName snap.Namespace = f.UniqueName
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
// create snapshot // create snapshot
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, s snapapi.VolumeSnapshot) { go func(n int, s snapapi.VolumeSnapshot) {
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = createSnapshot(&s, deployTimeout) wgErrs[n] = createSnapshot(&s, deployTimeout)
@ -736,7 +736,7 @@ var _ = Describe("nfs", func() {
// create multiple PVC from same snapshot // create multiple PVC from same snapshot
wg.Add(totalCount) wg.Add(totalCount)
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout) wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
@ -790,7 +790,7 @@ var _ = Describe("nfs", func() {
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -817,7 +817,7 @@ var _ = Describe("nfs", func() {
validateOmapCount(f, totalCount, cephfsType, metadataPool, snapsType) validateOmapCount(f, totalCount, cephfsType, metadataPool, snapsType)
// create clones from different snapshots and bind it to an app // create clones from different snapshots and bind it to an app
wg.Add(totalCount) wg.Add(totalCount)
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -872,7 +872,7 @@ var _ = Describe("nfs", func() {
wg.Add(totalCount) wg.Add(totalCount)
// delete snapshot // delete snapshot
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, s snapapi.VolumeSnapshot) { go func(n int, s snapapi.VolumeSnapshot) {
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = deleteSnapshot(&s, deployTimeout) wgErrs[n] = deleteSnapshot(&s, deployTimeout)
@ -896,7 +896,7 @@ var _ = Describe("nfs", func() {
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -980,7 +980,7 @@ var _ = Describe("nfs", func() {
appClone.Labels = label appClone.Labels = label
wg.Add(totalCount) wg.Add(totalCount)
// create clone and bind it to an app // create clone and bind it to an app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout) wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
@ -1036,7 +1036,7 @@ var _ = Describe("nfs", func() {
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name

View File

@ -2721,7 +2721,7 @@ var _ = Describe("RBD", func() {
} }
app.Namespace = f.UniqueName app.Namespace = f.UniqueName
// create PVC and app // create PVC and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
err := createPVCAndApp(name, f, pvc, app, deployTimeout) err := createPVCAndApp(name, f, pvc, app, deployTimeout)
if err != nil { if err != nil {
@ -2733,7 +2733,7 @@ var _ = Describe("RBD", func() {
validateRBDImageCount(f, totalCount, defaultRBDPool) validateRBDImageCount(f, totalCount, defaultRBDPool)
validateOmapCount(f, totalCount, rbdType, defaultRBDPool, volumesType) validateOmapCount(f, totalCount, rbdType, defaultRBDPool, volumesType)
// delete PVC and app // delete PVC and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
err := deletePVCAndApp(name, f, pvc, app) err := deletePVCAndApp(name, f, pvc, app)
if err != nil { if err != nil {
@ -3316,7 +3316,7 @@ var _ = Describe("RBD", func() {
appClone.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name appClone.Spec.Volumes[0].PersistentVolumeClaim.ClaimName = pvcClone.Name
// create PVC and app // create PVC and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
label := map[string]string{ label := map[string]string{
"app": name, "app": name,
@ -3329,7 +3329,7 @@ var _ = Describe("RBD", func() {
} }
} }
for i := 0; i < totalCount; i++ { for i := range totalCount {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
opt := metav1.ListOptions{ opt := metav1.ListOptions{
LabelSelector: "app=" + name, LabelSelector: "app=" + name,
@ -3348,7 +3348,7 @@ var _ = Describe("RBD", func() {
} }
// delete app // delete app
for i := 0; i < totalCount; i++ { for i := range totalCount {
name := fmt.Sprintf("%s%d", f.UniqueName, i) name := fmt.Sprintf("%s%d", f.UniqueName, i)
appClone.Name = name appClone.Name = name
err = deletePod(appClone.Name, appClone.Namespace, f.ClientSet, deployTimeout) err = deletePod(appClone.Name, appClone.Namespace, f.ClientSet, deployTimeout)
@ -3552,7 +3552,7 @@ var _ = Describe("RBD", func() {
// validate created backend rbd images // validate created backend rbd images
validateRBDImageCount(f, 1, defaultRBDPool) validateRBDImageCount(f, 1, defaultRBDPool)
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType) validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
for i := 0; i < snapChainDepth; i++ { for i := range snapChainDepth {
var pvcClone, smartClonePVC *v1.PersistentVolumeClaim var pvcClone, smartClonePVC *v1.PersistentVolumeClaim
snap := getSnapshot(snapshotPath) snap := getSnapshot(snapshotPath)
snap.Name = fmt.Sprintf("%s-%d", snap.Name, i) snap.Name = fmt.Sprintf("%s-%d", snap.Name, i)
@ -3722,7 +3722,7 @@ var _ = Describe("RBD", func() {
validateRBDImageCount(f, 1, defaultRBDPool) validateRBDImageCount(f, 1, defaultRBDPool)
validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType) validateOmapCount(f, 1, rbdType, defaultRBDPool, volumesType)
for i := 0; i < cloneChainDepth; i++ { for i := range cloneChainDepth {
var pvcClone *v1.PersistentVolumeClaim var pvcClone *v1.PersistentVolumeClaim
pvcClone, err = loadPVC(pvcSmartClonePath) pvcClone, err = loadPVC(pvcSmartClonePath)
if err != nil { if err != nil {

View File

@ -377,7 +377,7 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc,
snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name snap.Spec.Source.PersistentVolumeClaimName = &pvc.Name
// create snapshot // create snapshot
wg.Add(totalCount) wg.Add(totalCount)
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, s snapapi.VolumeSnapshot) { go func(n int, s snapapi.VolumeSnapshot) {
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = createSnapshot(&s, deployTimeout) wgErrs[n] = createSnapshot(&s, deployTimeout)
@ -416,7 +416,7 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc,
pvcClone.Spec.DataSource.Name = fmt.Sprintf("%s%d", f.UniqueName, 0) pvcClone.Spec.DataSource.Name = fmt.Sprintf("%s%d", f.UniqueName, 0)
// create multiple PVCs from same snapshot // create multiple PVCs from same snapshot
wg.Add(totalCount) wg.Add(totalCount)
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout) wgErrs[n] = createPVCAndApp(name, f, &p, &a, deployTimeout)
@ -440,7 +440,7 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc,
} }
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -464,7 +464,7 @@ func validateCloneInDifferentPool(f *framework.Framework, snapshotPool, cloneSc,
wg.Add(totalCount) wg.Add(totalCount)
// delete snapshot // delete snapshot
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, s snapapi.VolumeSnapshot) { go func(n int, s snapapi.VolumeSnapshot) {
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = deleteSnapshot(&s, deployTimeout) wgErrs[n] = deleteSnapshot(&s, deployTimeout)

View File

@ -927,7 +927,7 @@ func validatePVCClone(
appClone.Namespace = f.UniqueName appClone.Namespace = f.UniqueName
wg.Add(totalCount) wg.Add(totalCount)
// create clone and bind it to an app // create clone and bind it to an app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
label := make(map[string]string) label := make(map[string]string)
@ -1020,7 +1020,7 @@ func validatePVCClone(
validateRBDImageCount(f, totalCloneCount, defaultRBDPool) validateRBDImageCount(f, totalCloneCount, defaultRBDPool)
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -1131,7 +1131,7 @@ func validatePVCSnapshot(
wg.Add(totalCount) wg.Add(totalCount)
// create snapshot // create snapshot
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, s snapapi.VolumeSnapshot) { go func(n int, s snapapi.VolumeSnapshot) {
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
wgErrs[n] = createSnapshot(&s, deployTimeout) wgErrs[n] = createSnapshot(&s, deployTimeout)
@ -1189,7 +1189,7 @@ func validatePVCSnapshot(
// create multiple PVC from same snapshot // create multiple PVC from same snapshot
wg.Add(totalCount) wg.Add(totalCount)
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
label := make(map[string]string) label := make(map[string]string)
@ -1267,7 +1267,7 @@ func validatePVCSnapshot(
validateRBDImageCount(f, totalCloneCount, defaultRBDPool) validateRBDImageCount(f, totalCloneCount, defaultRBDPool)
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -1294,7 +1294,7 @@ func validatePVCSnapshot(
// create clones from different snapshots and bind it to an // create clones from different snapshots and bind it to an
// app // app
wg.Add(totalCount) wg.Add(totalCount)
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -1334,7 +1334,7 @@ func validatePVCSnapshot(
validateRBDImageCount(f, totalSnapCount, defaultRBDPool) validateRBDImageCount(f, totalSnapCount, defaultRBDPool)
wg.Add(totalCount) wg.Add(totalCount)
// delete snapshot // delete snapshot
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, s snapapi.VolumeSnapshot) { go func(n int, s snapapi.VolumeSnapshot) {
s.Name = fmt.Sprintf("%s%d", f.UniqueName, n) s.Name = fmt.Sprintf("%s%d", f.UniqueName, n)
content := &snapapi.VolumeSnapshotContent{} content := &snapapi.VolumeSnapshotContent{}
@ -1388,7 +1388,7 @@ func validatePVCSnapshot(
validateRBDImageCount(f, totalCount, defaultRBDPool) validateRBDImageCount(f, totalCount, defaultRBDPool)
wg.Add(totalCount) wg.Add(totalCount)
// delete clone and app // delete clone and app
for i := 0; i < totalCount; i++ { for i := range totalCount {
go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) { go func(n int, p v1.PersistentVolumeClaim, a v1.Pod) {
name := fmt.Sprintf("%s%d", f.UniqueName, n) name := fmt.Sprintf("%s%d", f.UniqueName, n)
p.Spec.DataSource.Name = name p.Spec.DataSource.Name = name
@ -1814,7 +1814,7 @@ func checkExports(f *framework.Framework, clusterID, clientString string) bool {
} }
found := false found := false
for i := 0; i < len(*exportList); i++ { for i := range len(*exportList) {
export := (*exportList)[i] export := (*exportList)[i]
for _, client := range export.Clients { for _, client := range export.Clients {
for _, address := range client.Addresses { for _, address := range client.Addresses {

4
go.mod
View File

@ -1,8 +1,8 @@
module github.com/ceph/ceph-csi module github.com/ceph/ceph-csi
go 1.21 go 1.22
toolchain go1.21.9 toolchain go1.22.2
require ( require (
github.com/IBM/keyprotect-go-client v0.12.2 github.com/IBM/keyprotect-go-client v0.12.2

View File

@ -106,13 +106,12 @@ func TestControllerServer_validateCreateVolumeGroupSnapshotRequest(t *testing.T)
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
err := cs.validateCreateVolumeGroupSnapshotRequest(ts.args.ctx, ts.args.req) err := cs.validateCreateVolumeGroupSnapshotRequest(tt.args.ctx, tt.args.req)
if ts.wantErr { if tt.wantErr {
c := status.Code(err) c := status.Code(err)
if c != ts.code { if c != tt.code {
t.Errorf("ControllerServer.validateVolumeGroupSnapshotRequest() error = %v, want code %v", err, c) t.Errorf("ControllerServer.validateVolumeGroupSnapshotRequest() error = %v, want code %v", err, c)
} }
} }

View File

@ -143,28 +143,27 @@ func Test_setMountOptions(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
tc := tt t.Run(tt.name, func(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel() t.Parallel()
driver := &csicommon.CSIDriver{} driver := &csicommon.CSIDriver{}
tc.ns.DefaultNodeServer = csicommon.NewDefaultNodeServer( tt.ns.DefaultNodeServer = csicommon.NewDefaultNodeServer(
driver, "cephfs", "", map[string]string{}, map[string]string{}, driver, "cephfs", "", map[string]string{}, map[string]string{},
) )
err := tc.ns.setMountOptions(tc.mnt, tc.volOptions, volCap, tmpConfPath) err := tt.ns.setMountOptions(tt.mnt, tt.volOptions, volCap, tmpConfPath)
if err != nil { if err != nil {
t.Errorf("setMountOptions() = %v", err) t.Errorf("setMountOptions() = %v", err)
} }
switch tc.mnt.(type) { switch tt.mnt.(type) {
case *mounter.FuseMounter: case *mounter.FuseMounter:
if !strings.Contains(tc.volOptions.FuseMountOptions, tc.want) { if !strings.Contains(tt.volOptions.FuseMountOptions, tt.want) {
t.Errorf("Set FuseMountOptions = %v Required FuseMountOptions = %v", tc.volOptions.FuseMountOptions, tc.want) t.Errorf("Set FuseMountOptions = %v Required FuseMountOptions = %v", tt.volOptions.FuseMountOptions, tt.want)
} }
case mounter.KernelMounter: case mounter.KernelMounter:
if !strings.Contains(tc.volOptions.KernelMountOptions, tc.want) { if !strings.Contains(tt.volOptions.KernelMountOptions, tt.want) {
t.Errorf("Set KernelMountOptions = %v Required KernelMountOptions = %v", tc.volOptions.KernelMountOptions, tc.want) t.Errorf("Set KernelMountOptions = %v Required KernelMountOptions = %v", tt.volOptions.KernelMountOptions, tt.want)
} }
} }
}) })

View File

@ -86,12 +86,11 @@ func TestIsVolumeCreateRO(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
wantErr := IsVolumeCreateRO(newtt.caps) wantErr := IsVolumeCreateRO(tt.caps)
if wantErr != newtt.isRO { if wantErr != tt.isRO {
t.Errorf("isVolumeCreateRO() wantErr = %v, isRO %v", wantErr, newtt.isRO) t.Errorf("isVolumeCreateRO() wantErr = %v, isRO %v", wantErr, tt.isRO)
} }
}) })
} }
@ -209,13 +208,12 @@ func TestIsShallowVolumeSupported(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) { t.Log(tt.args.req.GetVolumeContentSource().GetSnapshot())
t.Log(newtt.args.req.GetVolumeContentSource().GetSnapshot()) t.Log(IsVolumeCreateRO(tt.args.req.GetVolumeCapabilities()))
t.Log(IsVolumeCreateRO(newtt.args.req.GetVolumeCapabilities()))
t.Parallel() t.Parallel()
if got := IsShallowVolumeSupported(newtt.args.req); got != newtt.want { if got := IsShallowVolumeSupported(tt.args.req); got != tt.want {
t.Errorf("IsShallowVolumeSupported() = %v, want %v", got, newtt.want) t.Errorf("IsShallowVolumeSupported() = %v, want %v", got, tt.want)
} }
}) })
} }

View File

@ -43,14 +43,13 @@ func TestGetIPRange(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.cidr, func(t *testing.T) {
t.Run(ts.cidr, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := getIPRange(ts.cidr) got, err := getIPRange(tt.cidr)
require.NoError(t, err) require.NoError(t, err)
// validate if number of IPs in the range is same as expected, if not, fail. // validate if number of IPs in the range is same as expected, if not, fail.
require.ElementsMatch(t, ts.expectedIPs, got) require.ElementsMatch(t, tt.expectedIPs, got)
}) })
} }
} }
@ -86,20 +85,18 @@ func TestFetchIP(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.clientInfo, func(t *testing.T) {
t.Run(ts.clientInfo, func(t *testing.T) {
t.Parallel() t.Parallel()
client := activeClient{Inst: ts.clientInfo} client := activeClient{Inst: tt.clientInfo}
ip, actualErr := client.fetchIP() ip, actualErr := client.fetchIP()
if (actualErr != nil) != ts.expectedErr { if (actualErr != nil) != tt.expectedErr {
t.Errorf("expected error %v but got %v", ts.expectedErr, actualErr) t.Errorf("expected error %v but got %v", tt.expectedErr, actualErr)
} }
if ip != ts.expectedIP { if ip != tt.expectedIP {
t.Errorf("expected IP %s but got %s", ts.expectedIP, ip) t.Errorf("expected IP %s but got %s", tt.expectedIP, ip)
} }
}) })
} }
@ -126,18 +123,17 @@ func TestFetchID(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.clientInfo, func(t *testing.T) {
t.Run(ts.clientInfo, func(t *testing.T) {
t.Parallel() t.Parallel()
ac := &activeClient{Inst: ts.clientInfo} ac := &activeClient{Inst: tt.clientInfo}
actualID, actualErr := ac.fetchID() actualID, actualErr := ac.fetchID()
if (actualErr != nil) != ts.expectedErr { if (actualErr != nil) != tt.expectedErr {
t.Errorf("expected error %v but got %v", ts.expectedErr, actualErr) t.Errorf("expected error %v but got %v", tt.expectedErr, actualErr)
} }
if actualID != ts.expectedID { if actualID != tt.expectedID {
t.Errorf("expected ID %d but got %d", ts.expectedID, actualID) t.Errorf("expected ID %d but got %d", tt.expectedID, actualID)
} }
}) })
} }

View File

@ -71,7 +71,6 @@ func TestValidateSchedulingInterval(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
err := validateSchedulingInterval(tt.interval) err := validateSchedulingInterval(tt.interval)
@ -147,7 +146,6 @@ func TestValidateSchedulingDetails(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
err := validateSchedulingDetails(ctx, tt.parameters) err := validateSchedulingDetails(ctx, tt.parameters)
@ -203,7 +201,6 @@ func TestGetSchedulingDetails(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
interval, startTime := getSchedulingDetails(tt.parameters) interval, startTime := getSchedulingDetails(tt.parameters)
@ -251,11 +248,10 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if err := checkVolumeResyncStatus(ctx, ts.args); (err != nil) != ts.wantErr { if err := checkVolumeResyncStatus(ctx, tt.args); (err != nil) != tt.wantErr {
t.Errorf("checkVolumeResyncStatus() error = %v, expect error = %v", err, ts.wantErr) t.Errorf("checkVolumeResyncStatus() error = %v, expect error = %v", err, tt.wantErr)
} }
}) })
} }
@ -265,12 +261,12 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
t.Parallel() t.Parallel()
tests := []struct { tests := []struct {
name string name string
args librbd.GlobalMirrorImageStatus args *librbd.GlobalMirrorImageStatus
wantReady bool wantReady bool
}{ }{
{ {
name: "Test a single peer in sync", name: "Test a single peer in sync",
args: librbd.GlobalMirrorImageStatus{ args: &librbd.GlobalMirrorImageStatus{
SiteStatuses: []librbd.SiteMirrorImageStatus{ SiteStatuses: []librbd.SiteMirrorImageStatus{
{ {
MirrorUUID: "remote", MirrorUUID: "remote",
@ -283,7 +279,7 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
}, },
{ {
name: "Test a single peer in sync, including a local instance", name: "Test a single peer in sync, including a local instance",
args: librbd.GlobalMirrorImageStatus{ args: &librbd.GlobalMirrorImageStatus{
SiteStatuses: []librbd.SiteMirrorImageStatus{ SiteStatuses: []librbd.SiteMirrorImageStatus{
{ {
MirrorUUID: "remote", MirrorUUID: "remote",
@ -301,7 +297,7 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
}, },
{ {
name: "Test a multiple peers in sync", name: "Test a multiple peers in sync",
args: librbd.GlobalMirrorImageStatus{ args: &librbd.GlobalMirrorImageStatus{
SiteStatuses: []librbd.SiteMirrorImageStatus{ SiteStatuses: []librbd.SiteMirrorImageStatus{
{ {
MirrorUUID: "remote1", MirrorUUID: "remote1",
@ -319,14 +315,14 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
}, },
{ {
name: "Test no remote peers", name: "Test no remote peers",
args: librbd.GlobalMirrorImageStatus{ args: &librbd.GlobalMirrorImageStatus{
SiteStatuses: []librbd.SiteMirrorImageStatus{}, SiteStatuses: []librbd.SiteMirrorImageStatus{},
}, },
wantReady: false, wantReady: false,
}, },
{ {
name: "Test single peer not in sync", name: "Test single peer not in sync",
args: librbd.GlobalMirrorImageStatus{ args: &librbd.GlobalMirrorImageStatus{
SiteStatuses: []librbd.SiteMirrorImageStatus{ SiteStatuses: []librbd.SiteMirrorImageStatus{
{ {
MirrorUUID: "remote", MirrorUUID: "remote",
@ -339,7 +335,7 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
}, },
{ {
name: "Test single peer not up", name: "Test single peer not up",
args: librbd.GlobalMirrorImageStatus{ args: &librbd.GlobalMirrorImageStatus{
SiteStatuses: []librbd.SiteMirrorImageStatus{ SiteStatuses: []librbd.SiteMirrorImageStatus{
{ {
MirrorUUID: "remote", MirrorUUID: "remote",
@ -352,7 +348,7 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
}, },
{ {
name: "Test multiple peers, when first peer is not in sync", name: "Test multiple peers, when first peer is not in sync",
args: librbd.GlobalMirrorImageStatus{ args: &librbd.GlobalMirrorImageStatus{
SiteStatuses: []librbd.SiteMirrorImageStatus{ SiteStatuses: []librbd.SiteMirrorImageStatus{
{ {
MirrorUUID: "remote1", MirrorUUID: "remote1",
@ -370,7 +366,7 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
}, },
{ {
name: "Test multiple peers, when second peer is not up", name: "Test multiple peers, when second peer is not up",
args: librbd.GlobalMirrorImageStatus{ args: &librbd.GlobalMirrorImageStatus{
SiteStatuses: []librbd.SiteMirrorImageStatus{ SiteStatuses: []librbd.SiteMirrorImageStatus{
{ {
MirrorUUID: "remote1", MirrorUUID: "remote1",
@ -388,11 +384,10 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if ready := checkRemoteSiteStatus(context.TODO(), &ts.args); ready != ts.wantReady { if ready := checkRemoteSiteStatus(context.TODO(), tt.args); ready != tt.wantReady {
t.Errorf("checkRemoteSiteStatus() ready = %v, expect ready = %v", ready, ts.wantReady) t.Errorf("checkRemoteSiteStatus() ready = %v, expect ready = %v", ready, tt.wantReady)
} }
}) })
} }
@ -501,7 +496,6 @@ func TestValidateLastSyncInfo(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
teststruct, err := getLastSyncInfo(ctx, tt.description) teststruct, err := getLastSyncInfo(ctx, tt.description)
@ -600,7 +594,6 @@ func TestGetGRPCError(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
result := getGRPCError(tt.err) result := getGRPCError(tt.err)
@ -656,7 +649,6 @@ func Test_timestampFromString(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := timestampFromString(tt.timestamp) got, err := timestampFromString(tt.timestamp)

View File

@ -299,12 +299,11 @@ func TestIsFileRWO(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
rwoFile := IsFileRWO(newtt.caps) rwoFile := IsFileRWO(tt.caps)
if rwoFile != newtt.rwoFile { if rwoFile != tt.rwoFile {
t.Errorf("IsFileRWO() rwofile = %v, want %v", rwoFile, newtt.rwoFile) t.Errorf("IsFileRWO() rwofile = %v, want %v", rwoFile, tt.rwoFile)
} }
}) })
} }
@ -482,15 +481,14 @@ func TestIsBlockMultiWriter(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
multiWriter, block := IsBlockMultiWriter(newtt.caps) multiWriter, block := IsBlockMultiWriter(tt.caps)
if multiWriter != newtt.multiWriter { if multiWriter != tt.multiWriter {
t.Errorf("IsBlockMultiWriter() multiWriter = %v, want %v", multiWriter, newtt.multiWriter) t.Errorf("IsBlockMultiWriter() multiWriter = %v, want %v", multiWriter, tt.multiWriter)
} }
if block != newtt.block { if block != tt.block {
t.Errorf("IsBlockMultiWriter block = %v, want %v", block, newtt.block) t.Errorf("IsBlockMultiWriter block = %v, want %v", block, tt.block)
} }
}) })
} }
@ -615,12 +613,11 @@ func TestIsReaderOnly(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
roOnly := IsReaderOnly(newtt.caps) roOnly := IsReaderOnly(tt.caps)
if roOnly != newtt.roOnly { if roOnly != tt.roOnly {
t.Errorf("isReadOnly() roOnly = %v, want %v", roOnly, newtt.roOnly) t.Errorf("isReadOnly() roOnly = %v, want %v", roOnly, tt.roOnly)
} }
}) })
} }

View File

@ -41,7 +41,7 @@ func TestFileChecker(t *testing.T) {
t.Error("checker failed to start") t.Error("checker failed to start")
} }
for i := 0; i < 10; i++ { for range 10 {
// check health, should be healthy // check health, should be healthy
healthy, msg := checker.isHealthy() healthy, msg := checker.isHealthy()
if !healthy || msg != nil { if !healthy || msg != nil {

View File

@ -41,11 +41,11 @@ func TestStatChecker(t *testing.T) {
t.Error("checker failed to start") t.Error("checker failed to start")
} }
for i := 0; i < 10; i++ { for i := range 10 {
// check health, should be healthy // check health, should be healthy
healthy, msg := checker.isHealthy() healthy, msg := checker.isHealthy()
if !healthy || msg != nil { if !healthy || msg != nil {
t.Error("volume is unhealthy") t.Errorf("volume is unhealthy after %d tries", i+1)
} }
time.Sleep(time.Second) time.Sleep(time.Second)

View File

@ -73,15 +73,14 @@ func TestSetConfigInt(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
currentTT := tt t.Run(tt.name, func(t *testing.T) {
t.Run(currentTT.name, func(t *testing.T) {
t.Parallel() t.Parallel()
err := setConfigInt(currentTT.args.option, currentTT.args.config, currentTT.args.key) err := setConfigInt(tt.args.option, tt.args.config, tt.args.key)
if !errors.Is(err, currentTT.err) { if !errors.Is(err, tt.err) {
t.Errorf("setConfigInt() error = %v, wantErr %v", err, currentTT.err) t.Errorf("setConfigInt() error = %v, wantErr %v", err, tt.err)
} }
if err != nil { if err != nil {
require.NotEqual(t, currentTT.value, currentTT.args.option) require.NotEqual(t, tt.value, tt.args.option)
} }
}) })
} }

View File

@ -78,12 +78,11 @@ func Test_validateNodePublishVolumeRequest(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
currentTT := tt t.Run(tt.name, func(t *testing.T) {
t.Run(currentTT.name, func(t *testing.T) {
t.Parallel() t.Parallel()
err := validateNodePublishVolumeRequest(currentTT.args.req) err := validateNodePublishVolumeRequest(tt.args.req)
if (err != nil) != currentTT.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("validateNodePublishVoluemRequest() error = %v, wantErr %v", err, currentTT.wantErr) t.Errorf("validateNodePublishVoluemRequest() error = %v, wantErr %v", err, tt.wantErr)
} }
}) })
} }
@ -157,17 +156,16 @@ func Test_getSource(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
currentTT := tt t.Run(tt.name, func(t *testing.T) {
t.Run(currentTT.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := getSource(currentTT.args.volContext) got, err := getSource(tt.args.volContext)
if (err != nil) != currentTT.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("getSource() error = %v, wantErr %v", err, currentTT.wantErr) t.Errorf("getSource() error = %v, wantErr %v", err, tt.wantErr)
return return
} }
if got != currentTT.want { if got != tt.want {
t.Errorf("getSource() = %v, want %v", got, currentTT.want) t.Errorf("getSource() = %v, want %v", got, tt.want)
} }
}) })
} }

View File

@ -77,11 +77,10 @@ func TestValidateStriping(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if err := validateStriping(ts.parameters); (err != nil) != ts.wantErr { if err := validateStriping(tt.parameters); (err != nil) != tt.wantErr {
t.Errorf("validateStriping() error = %v, wantErr %v", err, ts.wantErr) t.Errorf("validateStriping() error = %v, wantErr %v", err, tt.wantErr)
} }
}) })
} }

View File

@ -76,23 +76,22 @@ func TestParseEncryptionOpts(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.testName, func(t *testing.T) {
t.Run(newtt.testName, func(t *testing.T) {
t.Parallel() t.Parallel()
actualKMS, actualEnc, actualErr := ParseEncryptionOpts( actualKMS, actualEnc, actualErr := ParseEncryptionOpts(
newtt.volOptions, tt.volOptions,
newtt.fallbackType, tt.fallbackType,
) )
if actualKMS != newtt.expectedKMS { if actualKMS != tt.expectedKMS {
t.Errorf("Expected KMS ID: %s, but got: %s", newtt.expectedKMS, actualKMS) t.Errorf("Expected KMS ID: %s, but got: %s", tt.expectedKMS, actualKMS)
} }
if actualEnc != newtt.expectedEnc { if actualEnc != tt.expectedEnc {
t.Errorf("Expected Encryption Type: %v, but got: %v", newtt.expectedEnc, actualEnc) t.Errorf("Expected Encryption Type: %v, but got: %v", tt.expectedEnc, actualEnc)
} }
if (actualErr != nil) != newtt.expectedErr { if (actualErr != nil) != tt.expectedErr {
t.Errorf("expected error %v but got %v", newtt.expectedErr, actualErr) t.Errorf("expected error %v but got %v", tt.expectedErr, actualErr)
} }
}) })
} }

View File

@ -54,12 +54,11 @@ func TestIsMigrationVolID(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got := isMigrationVolID(newtt.args) got := isMigrationVolID(tt.args)
if got != newtt.migVolID { if got != tt.migVolID {
t.Errorf("isMigrationVolID() = %v, want %v", got, newtt.migVolID) t.Errorf("isMigrationVolID() = %v, want %v", got, tt.migVolID)
} }
}) })
} }
@ -156,17 +155,16 @@ func TestParseMigrationVolID(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := parseMigrationVolID(newtt.args) got, err := parseMigrationVolID(tt.args)
if (err != nil) != newtt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("ParseMigrationVolID() error = %v, wantErr %v", err, newtt.wantErr) t.Errorf("ParseMigrationVolID() error = %v, wantErr %v", err, tt.wantErr)
return return
} }
if !reflect.DeepEqual(got, newtt.want) { if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ParseMigrationVolID() got = %v, want %v", got, newtt.want) t.Errorf("ParseMigrationVolID() got = %v, want %v", got, tt.want)
} }
}) })
} }

View File

@ -104,11 +104,10 @@ func TestParseBoolOption(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
tc := tt val := parseBoolOption(ctx, tt.scParameters, optionName, defaultValue)
val := parseBoolOption(ctx, tc.scParameters, optionName, defaultValue) if val != tt.expect {
if val != tc.expect {
t.Errorf("parseBoolOption(%v) returned: %t, expected: %t", t.Errorf("parseBoolOption(%v) returned: %t, expected: %t",
tc.scParameters, val, tc.expect) tt.scParameters, val, tt.expect)
} }
} }
} }
@ -188,15 +187,14 @@ func TestNodeServer_appendReadAffinityMapOptions(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
currentTT := tt t.Run(tt.name, func(t *testing.T) {
t.Run(currentTT.name, func(t *testing.T) {
t.Parallel() t.Parallel()
rv := &rbdVolume{ rv := &rbdVolume{
MapOptions: currentTT.args.mapOptions, MapOptions: tt.args.mapOptions,
Mounter: currentTT.args.mounter, Mounter: tt.args.mounter,
} }
rv.appendReadAffinityMapOptions(currentTT.args.readAffinityMapOptions) rv.appendReadAffinityMapOptions(tt.args.readAffinityMapOptions)
require.Equal(t, currentTT.want, rv.MapOptions) require.Equal(t, tt.want, rv.MapOptions)
}) })
} }
} }
@ -294,10 +292,9 @@ func TestReadAffinity_GetReadAffinityMapOptions(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
tc := tt t.Run(tt.name, func(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel() t.Parallel()
crushLocationMap := util.GetCrushLocationMap(tc.CLICrushLocationLabels, nodeLabels) crushLocationMap := util.GetCrushLocationMap(tt.CLICrushLocationLabels, nodeLabels)
cliReadAffinityMapOptions := util.ConstructReadAffinityMapOption(crushLocationMap) cliReadAffinityMapOptions := util.ConstructReadAffinityMapOption(crushLocationMap)
driver := &csicommon.CSIDriver{} driver := &csicommon.CSIDriver{}
@ -307,13 +304,13 @@ func TestReadAffinity_GetReadAffinityMapOptions(t *testing.T) {
), ),
} }
readAffinityMapOptions, err := util.GetReadAffinityMapOptions( readAffinityMapOptions, err := util.GetReadAffinityMapOptions(
tmpConfPath, tc.clusterID, ns.CLIReadAffinityOptions, nodeLabels, tmpConfPath, tt.clusterID, ns.CLIReadAffinityOptions, nodeLabels,
) )
if err != nil { if err != nil {
require.Fail(t, err.Error()) require.Fail(t, err.Error())
} }
require.Equal(t, tc.want, readAffinityMapOptions) require.Equal(t, tt.want, readAffinityMapOptions)
}) })
} }
} }

View File

@ -210,7 +210,7 @@ func findDeviceMappingImage(ctx context.Context, pool, namespace, image string,
// Stat a path, if it doesn't exist, retry maxRetries times. // Stat a path, if it doesn't exist, retry maxRetries times.
func waitForPath(ctx context.Context, pool, namespace, image string, maxRetries int, useNbdDriver bool) (string, bool) { func waitForPath(ctx context.Context, pool, namespace, image string, maxRetries int, useNbdDriver bool) (string, bool) {
for i := 0; i < maxRetries; i++ { for i := range maxRetries {
if i != 0 { if i != 0 {
time.Sleep(time.Second) time.Sleep(time.Second)
} }

View File

@ -82,24 +82,23 @@ func TestParseMapOptions(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tc := tt t.Run(tt.name, func(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel() t.Parallel()
krbdOpts, nbdOpts, err := parseMapOptions(tc.mapOption) krbdOpts, nbdOpts, err := parseMapOptions(tt.mapOption)
if err != nil && !strings.Contains(err.Error(), tc.expectErr) { if err != nil && !strings.Contains(err.Error(), tt.expectErr) {
// returned error // returned error
t.Errorf("parseMapOptions(%s) returned error, expected: %v, got: %v", t.Errorf("parseMapOptions(%s) returned error, expected: %v, got: %v",
tc.mapOption, tc.expectErr, err) tt.mapOption, tt.expectErr, err)
} }
if krbdOpts != tc.expectKrbdOptions { if krbdOpts != tt.expectKrbdOptions {
// unexpected krbd option error // unexpected krbd option error
t.Errorf("parseMapOptions(%s) returned unexpected krbd options, expected :%q, got: %q", t.Errorf("parseMapOptions(%s) returned unexpected krbd options, expected :%q, got: %q",
tc.mapOption, tc.expectKrbdOptions, krbdOpts) tt.mapOption, tt.expectKrbdOptions, krbdOpts)
} }
if nbdOpts != tc.expectNbdOptions { if nbdOpts != tt.expectNbdOptions {
// unexpected nbd option error // unexpected nbd option error
t.Errorf("parseMapOptions(%s) returned unexpected nbd options, expected: %q, got: %q", t.Errorf("parseMapOptions(%s) returned unexpected nbd options, expected: %q, got: %q",
tc.mapOption, tc.expectNbdOptions, nbdOpts) tt.mapOption, tt.expectNbdOptions, nbdOpts)
} }
}) })
} }

View File

@ -233,7 +233,6 @@ func TestGetCephClientLogFileName(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
val := getCephClientLogFileName(tt.args.id, tt.args.logDir, tt.args.prefix) val := getCephClientLogFileName(tt.args.id, tt.args.logDir, tt.args.prefix)
@ -250,7 +249,7 @@ func TestStrategicActionOnLogFile(t *testing.T) {
tmpDir := t.TempDir() tmpDir := t.TempDir()
var logFile [3]string var logFile [3]string
for i := 0; i < 3; i++ { for i := range 3 {
f, err := os.CreateTemp(tmpDir, "rbd-*.log") f, err := os.CreateTemp(tmpDir, "rbd-*.log")
if err != nil { if err != nil {
t.Errorf("creating tempfile failed: %v", err) t.Errorf("creating tempfile failed: %v", err)
@ -289,7 +288,6 @@ func TestStrategicActionOnLogFile(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
strategicActionOnLogFile(ctx, tt.args.logStrategy, tt.args.logFile) strategicActionOnLogFile(ctx, tt.args.logStrategy, tt.args.logFile)
@ -337,8 +335,7 @@ func TestIsKrbdFeatureSupported(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tc := tt t.Run(tt.name, func(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel() t.Parallel()
var err error var err error
krbdSupportedFeaturesAttr := "0x1" krbdSupportedFeaturesAttr := "0x1"
@ -349,12 +346,12 @@ func TestIsKrbdFeatureSupported(t *testing.T) {
// In case /sys/bus/rbd/supported_features is absent and we are // In case /sys/bus/rbd/supported_features is absent and we are
// not in a position to prepare krbd feature attributes, // not in a position to prepare krbd feature attributes,
// isKrbdFeatureSupported returns error ErrNotExist // isKrbdFeatureSupported returns error ErrNotExist
supported, err := isKrbdFeatureSupported(ctx, tc.featureName) supported, err := isKrbdFeatureSupported(ctx, tt.featureName)
if err != nil && !errors.Is(err, os.ErrNotExist) { if err != nil && !errors.Is(err, os.ErrNotExist) {
t.Errorf("isKrbdFeatureSupported(%s) returned error: %v", tc.featureName, err) t.Errorf("isKrbdFeatureSupported(%s) returned error: %v", tt.featureName, err)
} else if supported != tc.isSupported { } else if supported != tt.isSupported {
t.Errorf("isKrbdFeatureSupported(%s) returned supported status, expected: %t, got: %t", t.Errorf("isKrbdFeatureSupported(%s) returned supported status, expected: %t, got: %t",
tc.featureName, tc.isSupported, supported) tt.featureName, tt.isSupported, supported)
} }
}) })
} }
@ -382,11 +379,10 @@ func Test_checkValidImageFeatures(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tc := tt t.Run(tt.name, func(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := checkValidImageFeatures(tc.imageFeatures, tc.ok); got != tc.want { if got := checkValidImageFeatures(tt.imageFeatures, tt.ok); got != tt.want {
t.Errorf("checkValidImageFeatures() = %v, want %v", got, tc.want) t.Errorf("checkValidImageFeatures() = %v, want %v", got, tt.want)
} }
}) })
} }

View File

@ -64,25 +64,24 @@ func TestExecCommandWithTimeout(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
stdout, _, err := ExecCommandWithTimeout(newtt.args.ctx, stdout, _, err := ExecCommandWithTimeout(tt.args.ctx,
newtt.args.timeout, tt.args.timeout,
newtt.args.program, tt.args.program,
newtt.args.args...) tt.args.args...)
if (err != nil) != newtt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("ExecCommandWithTimeout() error = %v, wantErr %v", err, newtt.wantErr) t.Errorf("ExecCommandWithTimeout() error = %v, wantErr %v", err, tt.wantErr)
return return
} }
if newtt.wantErr && !errors.Is(err, newtt.expectedErr) { if tt.wantErr && !errors.Is(err, tt.expectedErr) {
t.Errorf("ExecCommandWithTimeout() error expected got = %v, want %v", err, newtt.expectedErr) t.Errorf("ExecCommandWithTimeout() error expected got = %v, want %v", err, tt.expectedErr)
} }
if stdout != newtt.stdout { if stdout != tt.stdout {
t.Errorf("ExecCommandWithTimeout() got = %v, want %v", stdout, newtt.stdout) t.Errorf("ExecCommandWithTimeout() got = %v, want %v", stdout, tt.stdout)
} }
}) })
} }

View File

@ -138,23 +138,21 @@ func TestGetClusterMappingInfo(t *testing.T) {
}, },
} }
for i, tt := range tests { for i, tt := range tests {
currentI := i t.Run(tt.name, func(t *testing.T) {
currentTT := tt
t.Run(currentTT.name, func(t *testing.T) {
t.Parallel() t.Parallel()
mappingConfigFile := fmt.Sprintf("%s/mapping-%d.json", mappingBasePath, currentI) mappingConfigFile := fmt.Sprintf("%s/mapping-%d.json", mappingBasePath, i)
if len(currentTT.mappingFilecontent) != 0 { if len(tt.mappingFilecontent) != 0 {
err = os.WriteFile(mappingConfigFile, currentTT.mappingFilecontent, 0o600) err = os.WriteFile(mappingConfigFile, tt.mappingFilecontent, 0o600)
if err != nil { if err != nil {
t.Errorf("failed to write to %q, error = %v", mappingConfigFile, err) t.Errorf("failed to write to %q, error = %v", mappingConfigFile, err)
} }
} }
data, mErr := getClusterMappingInfo(currentTT.clusterID, mappingConfigFile) data, mErr := getClusterMappingInfo(tt.clusterID, mappingConfigFile)
if (mErr != nil) != currentTT.expectErr { if (mErr != nil) != tt.expectErr {
t.Errorf("getClusterMappingInfo() error = %v, expected Error %v", mErr, currentTT.expectErr) t.Errorf("getClusterMappingInfo() error = %v, expected Error %v", mErr, tt.expectErr)
} }
if !reflect.DeepEqual(data, currentTT.expectedData) { if !reflect.DeepEqual(data, tt.expectedData) {
t.Errorf("getClusterMappingInfo() = %v, expected data %v", data, currentTT.expectedData) t.Errorf("getClusterMappingInfo() = %v, expected data %v", data, tt.expectedData)
} }
}) })
} }
@ -285,7 +283,6 @@ func TestGetMappedID(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
val := GetMappedID(tt.args.key, tt.args.value, tt.args.id) val := GetMappedID(tt.args.key, tt.args.value, tt.args.id)
@ -407,7 +404,6 @@ func TestFetchMappedClusterIDAndMons(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, got1, err := fetchMappedClusterIDAndMons(ctx, tt.args.clusterID, clusterMappingConfigFile, csiConfigFile) got, got1, err := fetchMappedClusterIDAndMons(ctx, tt.args.clusterID, clusterMappingConfigFile, csiConfigFile)

View File

@ -39,11 +39,10 @@ func TestIsMigrationSecret(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := isMigrationSecret(newtt.vc); got != newtt.want { if got := isMigrationSecret(tt.vc); got != tt.want {
t.Errorf("isMigrationSecret() = %v, want %v", got, newtt.want) t.Errorf("isMigrationSecret() = %v, want %v", got, tt.want)
} }
}) })
} }
@ -83,17 +82,16 @@ func TestParseAndSetSecretMapFromMigSecret(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
newtt := tt t.Run(tt.name, func(t *testing.T) {
t.Run(newtt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := ParseAndSetSecretMapFromMigSecret(newtt.secretmap) got, err := ParseAndSetSecretMapFromMigSecret(tt.secretmap)
if (err != nil) != newtt.wantErr { if (err != nil) != tt.wantErr {
t.Errorf("ParseAndSetSecretMapFromMigSecret() error = %v, wantErr %v", err, newtt.wantErr) t.Errorf("ParseAndSetSecretMapFromMigSecret() error = %v, wantErr %v", err, tt.wantErr)
return return
} }
if !reflect.DeepEqual(got, newtt.want) { if !reflect.DeepEqual(got, tt.want) {
t.Errorf("ParseAndSetSecretMapFromMigSecret() got = %v, want %v", got, newtt.want) t.Errorf("ParseAndSetSecretMapFromMigSecret() got = %v, want %v", got, tt.want)
} }
}) })
} }

View File

@ -102,12 +102,11 @@ func Test_getCrushLocationMap(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
currentTT := tt t.Run(tt.name, func(t *testing.T) {
t.Run(currentTT.name, func(t *testing.T) {
t.Parallel() t.Parallel()
require.Equal(t, require.Equal(t,
currentTT.want, tt.want,
getCrushLocationMap(currentTT.args.crushLocationLabels, currentTT.args.nodeLabels)) getCrushLocationMap(tt.args.crushLocationLabels, tt.args.nodeLabels))
}) })
} }
} }

View File

@ -199,17 +199,16 @@ func TestGetRBDNetNamespaceFilePath(t *testing.T) {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err) t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := GetRBDNetNamespaceFilePath(tmpConfPath, ts.clusterID) got, err := GetRBDNetNamespaceFilePath(tmpConfPath, tt.clusterID)
if err != nil { if err != nil {
t.Errorf("GetRBDNetNamespaceFilePath() error = %v", err) t.Errorf("GetRBDNetNamespaceFilePath() error = %v", err)
return return
} }
if got != ts.want { if got != tt.want {
t.Errorf("GetRBDNetNamespaceFilePath() = %v, want %v", got, ts.want) t.Errorf("GetRBDNetNamespaceFilePath() = %v, want %v", got, tt.want)
} }
}) })
} }
@ -269,17 +268,16 @@ func TestGetCephFSNetNamespaceFilePath(t *testing.T) {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err) t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := GetCephFSNetNamespaceFilePath(tmpConfPath, ts.clusterID) got, err := GetCephFSNetNamespaceFilePath(tmpConfPath, tt.clusterID)
if err != nil { if err != nil {
t.Errorf("GetCephFSNetNamespaceFilePath() error = %v", err) t.Errorf("GetCephFSNetNamespaceFilePath() error = %v", err)
return return
} }
if got != ts.want { if got != tt.want {
t.Errorf("GetCephFSNetNamespaceFilePath() = %v, want %v", got, ts.want) t.Errorf("GetCephFSNetNamespaceFilePath() = %v, want %v", got, tt.want)
} }
}) })
} }
@ -339,17 +337,16 @@ func TestGetNFSNetNamespaceFilePath(t *testing.T) {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err) t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got, err := GetNFSNetNamespaceFilePath(tmpConfPath, ts.clusterID) got, err := GetNFSNetNamespaceFilePath(tmpConfPath, tt.clusterID)
if err != nil { if err != nil {
t.Errorf("GetNFSNetNamespaceFilePath() error = %v", err) t.Errorf("GetNFSNetNamespaceFilePath() error = %v", err)
return return
} }
if got != ts.want { if got != tt.want {
t.Errorf("GetNFSNetNamespaceFilePath() = %v, want %v", got, ts.want) t.Errorf("GetNFSNetNamespaceFilePath() = %v, want %v", got, tt.want)
} }
}) })
} }
@ -443,17 +440,16 @@ func TestGetReadAffinityOptions(t *testing.T) {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err) t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
} }
for _, tt := range tests { for _, tt := range tests {
tc := tt t.Run(tt.name, func(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel() t.Parallel()
enabled, labels, err := GetCrushLocationLabels(tmpConfPath, tc.clusterID) enabled, labels, err := GetCrushLocationLabels(tmpConfPath, tt.clusterID)
if err != nil { if err != nil {
t.Errorf("GetCrushLocationLabels() error = %v", err) t.Errorf("GetCrushLocationLabels() error = %v", err)
return return
} }
if enabled != tc.want.enabled || labels != tc.want.labels { if enabled != tt.want.enabled || labels != tt.want.labels {
t.Errorf("GetCrushLocationLabels() = {%v %v} want %v", enabled, labels, tc.want) t.Errorf("GetCrushLocationLabels() = {%v %v} want %v", enabled, labels, tt.want)
} }
}) })
} }
@ -518,16 +514,15 @@ func TestGetCephFSMountOptions(t *testing.T) {
} }
for _, tt := range tests { for _, tt := range tests {
tc := tt t.Run(tt.name, func(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
t.Parallel() t.Parallel()
kernelMntOptions, fuseMntOptions, err := GetCephFSMountOptions(tmpConfPath, tc.clusterID) kernelMntOptions, fuseMntOptions, err := GetCephFSMountOptions(tmpConfPath, tt.clusterID)
if err != nil { if err != nil {
t.Errorf("GetCephFSMountOptions() error = %v", err) t.Errorf("GetCephFSMountOptions() error = %v", err)
} }
if kernelMntOptions != tc.wantKernelMntOptions || fuseMntOptions != tc.wantFuseMntOptions { if kernelMntOptions != tt.wantKernelMntOptions || fuseMntOptions != tt.wantFuseMntOptions {
t.Errorf("GetCephFSMountOptions() = (%v, %v), want (%v, %v)", t.Errorf("GetCephFSMountOptions() = (%v, %v), want (%v, %v)",
kernelMntOptions, fuseMntOptions, tc.wantKernelMntOptions, tc.wantFuseMntOptions, kernelMntOptions, fuseMntOptions, tt.wantKernelMntOptions, tt.wantFuseMntOptions,
) )
} }
}) })
@ -588,18 +583,17 @@ func TestGetRBDMirrorDaemonCount(t *testing.T) {
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err) t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
var got int var got int
got, err = GetRBDMirrorDaemonCount(tmpConfPath, ts.clusterID) got, err = GetRBDMirrorDaemonCount(tmpConfPath, tt.clusterID)
if err != nil { if err != nil {
t.Errorf("GetRBDMirrorDaemonCount() error = %v", err) t.Errorf("GetRBDMirrorDaemonCount() error = %v", err)
return return
} }
if got != ts.want { if got != tt.want {
t.Errorf("GetRBDMirrorDaemonCount() = %v, want %v", got, ts.want) t.Errorf("GetRBDMirrorDaemonCount() = %v, want %v", got, tt.want)
} }
}) })
} }

View File

@ -50,12 +50,11 @@ func TestRemoveCSIPrefixedParameters(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
got := RemoveCSIPrefixedParameters(ts.param) got := RemoveCSIPrefixedParameters(tt.param)
if !reflect.DeepEqual(got, ts.want) { if !reflect.DeepEqual(got, tt.want) {
t.Errorf("RemoveCSIPrefixedParameters() = %v, want %v", got, ts.want) t.Errorf("RemoveCSIPrefixedParameters() = %v, want %v", got, tt.want)
} }
}) })
} }
@ -84,11 +83,10 @@ func TestGetOwner(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := GetOwner(ts.args); got != ts.want { if got := GetOwner(tt.args); got != tt.want {
t.Errorf("GetOwner() = %v, want %v", got, ts.want) t.Errorf("GetOwner() = %v, want %v", got, tt.want)
} }
}) })
} }

View File

@ -59,10 +59,9 @@ func TestReadAffinity_ConstructReadAffinityMapOption(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
currentTT := tt
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
t.Parallel() t.Parallel()
require.Contains(t, currentTT.wantAny, ConstructReadAffinityMapOption(currentTT.crushLocationmap)) require.Contains(t, tt.wantAny, ConstructReadAffinityMapOption(tt.crushLocationmap))
}) })
} }
} }

View File

@ -244,7 +244,7 @@ func TestRTRemove(t *testing.T) {
"ref3": reftype.Normal, "ref3": reftype.Normal,
} }
for i := 0; i < 2; i++ { for range 2 {
created, err := Add(ioctx, rtName, refsToAdd) created, err := Add(ioctx, rtName, refsToAdd)
require.NoError(ts, err) require.NoError(ts, err)
require.True(ts, created) require.True(ts, created)

View File

@ -74,11 +74,10 @@ func TestRoundOffBytes(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := RoundOffBytes(ts.args.bytes); got != ts.want { if got := RoundOffBytes(tt.args.bytes); got != tt.want {
t.Errorf("RoundOffBytes() = %v, want %v", got, ts.want) t.Errorf("RoundOffBytes() = %v, want %v", got, tt.want)
} }
}) })
} }
@ -138,11 +137,10 @@ func TestRoundOffVolSize(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := RoundOffVolSize(ts.args.size); got != ts.want { if got := RoundOffVolSize(tt.args.size); got != tt.want {
t.Errorf("RoundOffVolSize() = %v, want %v", got, ts.want) t.Errorf("RoundOffVolSize() = %v, want %v", got, tt.want)
} }
}) })
} }
@ -233,13 +231,11 @@ func TestMountOptionsAdd(t *testing.T) {
} }
for _, moaTest := range moaTests { for _, moaTest := range moaTests {
mt := moaTest
moaTest := moaTest
t.Run(moaTest.name, func(t *testing.T) { t.Run(moaTest.name, func(t *testing.T) {
t.Parallel() t.Parallel()
result := MountOptionsAdd(mt.mountOptions, mt.option...) result := MountOptionsAdd(moaTest.mountOptions, moaTest.option...)
if result != mt.result { if result != moaTest.result {
t.Errorf("MountOptionsAdd(): %v, want %v", result, mt.result) t.Errorf("MountOptionsAdd(): %v, want %v", result, moaTest.result)
} }
}) })
} }
@ -402,11 +398,10 @@ func TestRoundOffCephFSVolSize(t *testing.T) {
}, },
} }
for _, tt := range tests { for _, tt := range tests {
ts := tt t.Run(tt.name, func(t *testing.T) {
t.Run(ts.name, func(t *testing.T) {
t.Parallel() t.Parallel()
if got := RoundOffCephFSVolSize(ts.size); got != ts.want { if got := RoundOffCephFSVolSize(tt.size); got != tt.want {
t.Errorf("RoundOffCephFSVolSize() = %v, want %v", got, ts.want) t.Errorf("RoundOffCephFSVolSize() = %v, want %v", got, tt.want)
} }
}) })
} }