mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-09 16:00:22 +00:00
build: address 'copyloopvar' linter warning
golangci-lint reports these: The copy of the 'for' variable "kmsID" can be deleted (Go 1.22+) (copyloopvar) Signed-off-by: Niels de Vos <ndevos@ibm.com>
This commit is contained in:
parent
9321fe03c3
commit
0e7b06e9d0
@ -514,8 +514,6 @@ var _ = Describe(cephfsType, func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for kmsID, kmsConf := range kmsToTest {
|
for kmsID, kmsConf := range kmsToTest {
|
||||||
kmsID := kmsID
|
|
||||||
kmsConf := kmsConf
|
|
||||||
By("create a storageclass with pool and an encrypted PVC then bind it to an app with "+kmsID, func() {
|
By("create a storageclass with pool and an encrypted PVC then bind it to an app with "+kmsID, func() {
|
||||||
scOpts := map[string]string{
|
scOpts := map[string]string{
|
||||||
"encrypted": "true",
|
"encrypted": "true",
|
||||||
@ -1649,7 +1647,6 @@ var _ = Describe(cephfsType, func() {
|
|||||||
|
|
||||||
if testCephFSFscrypt {
|
if testCephFSFscrypt {
|
||||||
for _, kmsID := range []string{"secrets-metadata-test", "vault-test"} {
|
for _, kmsID := range []string{"secrets-metadata-test", "vault-test"} {
|
||||||
kmsID := kmsID
|
|
||||||
By("checking encrypted snapshot-backed volume with KMS "+kmsID, func() {
|
By("checking encrypted snapshot-backed volume with KMS "+kmsID, func() {
|
||||||
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
|
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -2234,8 +2231,6 @@ var _ = Describe(cephfsType, func() {
|
|||||||
"vault-test": vaultKMS,
|
"vault-test": vaultKMS,
|
||||||
}
|
}
|
||||||
for kmsID, kmsConf := range kmsToTest {
|
for kmsID, kmsConf := range kmsToTest {
|
||||||
kmsID := kmsID
|
|
||||||
kmsConf := kmsConf
|
|
||||||
By("create an encrypted PVC-PVC clone and bind it to an app with "+kmsID, func() {
|
By("create an encrypted PVC-PVC clone and bind it to an app with "+kmsID, func() {
|
||||||
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
|
err := deleteResource(cephFSExamplePath + "storageclass.yaml")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -75,7 +75,6 @@ Error from server (AlreadyExists): error when creating "STDIN": deployments.apps
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if got := getStdErr(tt.errString); got != tt.expected {
|
if got := getStdErr(tt.errString); got != tt.expected {
|
||||||
|
@ -106,13 +106,12 @@ func TestControllerServer_validateCreateVolumeGroupSnapshotRequest(t *testing.T)
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
err := cs.validateCreateVolumeGroupSnapshotRequest(ts.args.ctx, ts.args.req)
|
err := cs.validateCreateVolumeGroupSnapshotRequest(tt.args.ctx, tt.args.req)
|
||||||
if ts.wantErr {
|
if tt.wantErr {
|
||||||
c := status.Code(err)
|
c := status.Code(err)
|
||||||
if c != ts.code {
|
if c != tt.code {
|
||||||
t.Errorf("ControllerServer.validateVolumeGroupSnapshotRequest() error = %v, want code %v", err, c)
|
t.Errorf("ControllerServer.validateVolumeGroupSnapshotRequest() error = %v, want code %v", err, c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -143,28 +143,27 @@ func Test_setMountOptions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tc := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
driver := &csicommon.CSIDriver{}
|
driver := &csicommon.CSIDriver{}
|
||||||
tc.ns.DefaultNodeServer = csicommon.NewDefaultNodeServer(
|
tt.ns.DefaultNodeServer = csicommon.NewDefaultNodeServer(
|
||||||
driver, "cephfs", "", map[string]string{}, map[string]string{},
|
driver, "cephfs", "", map[string]string{}, map[string]string{},
|
||||||
)
|
)
|
||||||
|
|
||||||
err := tc.ns.setMountOptions(tc.mnt, tc.volOptions, volCap, tmpConfPath)
|
err := tt.ns.setMountOptions(tt.mnt, tt.volOptions, volCap, tmpConfPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("setMountOptions() = %v", err)
|
t.Errorf("setMountOptions() = %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch tc.mnt.(type) {
|
switch tt.mnt.(type) {
|
||||||
case *mounter.FuseMounter:
|
case *mounter.FuseMounter:
|
||||||
if !strings.Contains(tc.volOptions.FuseMountOptions, tc.want) {
|
if !strings.Contains(tt.volOptions.FuseMountOptions, tt.want) {
|
||||||
t.Errorf("Set FuseMountOptions = %v Required FuseMountOptions = %v", tc.volOptions.FuseMountOptions, tc.want)
|
t.Errorf("Set FuseMountOptions = %v Required FuseMountOptions = %v", tt.volOptions.FuseMountOptions, tt.want)
|
||||||
}
|
}
|
||||||
case mounter.KernelMounter:
|
case mounter.KernelMounter:
|
||||||
if !strings.Contains(tc.volOptions.KernelMountOptions, tc.want) {
|
if !strings.Contains(tt.volOptions.KernelMountOptions, tt.want) {
|
||||||
t.Errorf("Set KernelMountOptions = %v Required KernelMountOptions = %v", tc.volOptions.KernelMountOptions, tc.want)
|
t.Errorf("Set KernelMountOptions = %v Required KernelMountOptions = %v", tt.volOptions.KernelMountOptions, tt.want)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -86,12 +86,11 @@ func TestIsVolumeCreateRO(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
wantErr := IsVolumeCreateRO(newtt.caps)
|
wantErr := IsVolumeCreateRO(tt.caps)
|
||||||
if wantErr != newtt.isRO {
|
if wantErr != tt.isRO {
|
||||||
t.Errorf("isVolumeCreateRO() wantErr = %v, isRO %v", wantErr, newtt.isRO)
|
t.Errorf("isVolumeCreateRO() wantErr = %v, isRO %v", wantErr, tt.isRO)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -209,13 +208,12 @@ func TestIsShallowVolumeSupported(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
t.Log(tt.args.req.GetVolumeContentSource().GetSnapshot())
|
||||||
t.Log(newtt.args.req.GetVolumeContentSource().GetSnapshot())
|
t.Log(IsVolumeCreateRO(tt.args.req.GetVolumeCapabilities()))
|
||||||
t.Log(IsVolumeCreateRO(newtt.args.req.GetVolumeCapabilities()))
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if got := IsShallowVolumeSupported(newtt.args.req); got != newtt.want {
|
if got := IsShallowVolumeSupported(tt.args.req); got != tt.want {
|
||||||
t.Errorf("IsShallowVolumeSupported() = %v, want %v", got, newtt.want)
|
t.Errorf("IsShallowVolumeSupported() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -43,14 +43,13 @@ func TestGetIPRange(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.cidr, func(t *testing.T) {
|
||||||
t.Run(ts.cidr, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got, err := getIPRange(ts.cidr)
|
got, err := getIPRange(tt.cidr)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
// validate if number of IPs in the range is same as expected, if not, fail.
|
// validate if number of IPs in the range is same as expected, if not, fail.
|
||||||
require.ElementsMatch(t, ts.expectedIPs, got)
|
require.ElementsMatch(t, tt.expectedIPs, got)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -86,20 +85,18 @@ func TestFetchIP(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.clientInfo, func(t *testing.T) {
|
||||||
|
|
||||||
t.Run(ts.clientInfo, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
|
|
||||||
client := activeClient{Inst: ts.clientInfo}
|
client := activeClient{Inst: tt.clientInfo}
|
||||||
ip, actualErr := client.fetchIP()
|
ip, actualErr := client.fetchIP()
|
||||||
|
|
||||||
if (actualErr != nil) != ts.expectedErr {
|
if (actualErr != nil) != tt.expectedErr {
|
||||||
t.Errorf("expected error %v but got %v", ts.expectedErr, actualErr)
|
t.Errorf("expected error %v but got %v", tt.expectedErr, actualErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if ip != ts.expectedIP {
|
if ip != tt.expectedIP {
|
||||||
t.Errorf("expected IP %s but got %s", ts.expectedIP, ip)
|
t.Errorf("expected IP %s but got %s", tt.expectedIP, ip)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -126,18 +123,17 @@ func TestFetchID(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.clientInfo, func(t *testing.T) {
|
||||||
t.Run(ts.clientInfo, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
ac := &activeClient{Inst: ts.clientInfo}
|
ac := &activeClient{Inst: tt.clientInfo}
|
||||||
actualID, actualErr := ac.fetchID()
|
actualID, actualErr := ac.fetchID()
|
||||||
|
|
||||||
if (actualErr != nil) != ts.expectedErr {
|
if (actualErr != nil) != tt.expectedErr {
|
||||||
t.Errorf("expected error %v but got %v", ts.expectedErr, actualErr)
|
t.Errorf("expected error %v but got %v", tt.expectedErr, actualErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if actualID != ts.expectedID {
|
if actualID != tt.expectedID {
|
||||||
t.Errorf("expected ID %d but got %d", ts.expectedID, actualID)
|
t.Errorf("expected ID %d but got %d", tt.expectedID, actualID)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,6 @@ func TestValidateSchedulingInterval(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
err := validateSchedulingInterval(tt.interval)
|
err := validateSchedulingInterval(tt.interval)
|
||||||
@ -147,7 +146,6 @@ func TestValidateSchedulingDetails(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
err := validateSchedulingDetails(ctx, tt.parameters)
|
err := validateSchedulingDetails(ctx, tt.parameters)
|
||||||
@ -203,7 +201,6 @@ func TestGetSchedulingDetails(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
interval, startTime := getSchedulingDetails(tt.parameters)
|
interval, startTime := getSchedulingDetails(tt.parameters)
|
||||||
@ -251,11 +248,10 @@ func TestCheckVolumeResyncStatus(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if err := checkVolumeResyncStatus(ctx, ts.args); (err != nil) != ts.wantErr {
|
if err := checkVolumeResyncStatus(ctx, tt.args); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("checkVolumeResyncStatus() error = %v, expect error = %v", err, ts.wantErr)
|
t.Errorf("checkVolumeResyncStatus() error = %v, expect error = %v", err, tt.wantErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -388,11 +384,10 @@ func TestCheckRemoteSiteStatus(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if ready := checkRemoteSiteStatus(context.TODO(), &ts.args); ready != ts.wantReady {
|
if ready := checkRemoteSiteStatus(context.TODO(), &tt.args); ready != tt.wantReady {
|
||||||
t.Errorf("checkRemoteSiteStatus() ready = %v, expect ready = %v", ready, ts.wantReady)
|
t.Errorf("checkRemoteSiteStatus() ready = %v, expect ready = %v", ready, tt.wantReady)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -501,7 +496,6 @@ func TestValidateLastSyncInfo(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
teststruct, err := getLastSyncInfo(ctx, tt.description)
|
teststruct, err := getLastSyncInfo(ctx, tt.description)
|
||||||
@ -600,7 +594,6 @@ func TestGetGRPCError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
result := getGRPCError(tt.err)
|
result := getGRPCError(tt.err)
|
||||||
@ -656,7 +649,6 @@ func Test_timestampFromString(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got, err := timestampFromString(tt.timestamp)
|
got, err := timestampFromString(tt.timestamp)
|
||||||
|
@ -299,12 +299,11 @@ func TestIsFileRWO(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rwoFile := IsFileRWO(newtt.caps)
|
rwoFile := IsFileRWO(tt.caps)
|
||||||
if rwoFile != newtt.rwoFile {
|
if rwoFile != tt.rwoFile {
|
||||||
t.Errorf("IsFileRWO() rwofile = %v, want %v", rwoFile, newtt.rwoFile)
|
t.Errorf("IsFileRWO() rwofile = %v, want %v", rwoFile, tt.rwoFile)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -482,15 +481,14 @@ func TestIsBlockMultiWriter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
multiWriter, block := IsBlockMultiWriter(newtt.caps)
|
multiWriter, block := IsBlockMultiWriter(tt.caps)
|
||||||
if multiWriter != newtt.multiWriter {
|
if multiWriter != tt.multiWriter {
|
||||||
t.Errorf("IsBlockMultiWriter() multiWriter = %v, want %v", multiWriter, newtt.multiWriter)
|
t.Errorf("IsBlockMultiWriter() multiWriter = %v, want %v", multiWriter, tt.multiWriter)
|
||||||
}
|
}
|
||||||
if block != newtt.block {
|
if block != tt.block {
|
||||||
t.Errorf("IsBlockMultiWriter block = %v, want %v", block, newtt.block)
|
t.Errorf("IsBlockMultiWriter block = %v, want %v", block, tt.block)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -615,12 +613,11 @@ func TestIsReaderOnly(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
roOnly := IsReaderOnly(newtt.caps)
|
roOnly := IsReaderOnly(tt.caps)
|
||||||
if roOnly != newtt.roOnly {
|
if roOnly != tt.roOnly {
|
||||||
t.Errorf("isReadOnly() roOnly = %v, want %v", roOnly, newtt.roOnly)
|
t.Errorf("isReadOnly() roOnly = %v, want %v", roOnly, tt.roOnly)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -73,15 +73,14 @@ func TestSetConfigInt(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
currentTT := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(currentTT.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
err := setConfigInt(currentTT.args.option, currentTT.args.config, currentTT.args.key)
|
err := setConfigInt(tt.args.option, tt.args.config, tt.args.key)
|
||||||
if !errors.Is(err, currentTT.err) {
|
if !errors.Is(err, tt.err) {
|
||||||
t.Errorf("setConfigInt() error = %v, wantErr %v", err, currentTT.err)
|
t.Errorf("setConfigInt() error = %v, wantErr %v", err, tt.err)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
require.NotEqual(t, currentTT.value, currentTT.args.option)
|
require.NotEqual(t, tt.value, tt.args.option)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -78,12 +78,11 @@ func Test_validateNodePublishVolumeRequest(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
currentTT := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(currentTT.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
err := validateNodePublishVolumeRequest(currentTT.args.req)
|
err := validateNodePublishVolumeRequest(tt.args.req)
|
||||||
if (err != nil) != currentTT.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("validateNodePublishVoluemRequest() error = %v, wantErr %v", err, currentTT.wantErr)
|
t.Errorf("validateNodePublishVoluemRequest() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -157,17 +156,16 @@ func Test_getSource(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
currentTT := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(currentTT.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got, err := getSource(currentTT.args.volContext)
|
got, err := getSource(tt.args.volContext)
|
||||||
if (err != nil) != currentTT.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("getSource() error = %v, wantErr %v", err, currentTT.wantErr)
|
t.Errorf("getSource() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if got != currentTT.want {
|
if got != tt.want {
|
||||||
t.Errorf("getSource() = %v, want %v", got, currentTT.want)
|
t.Errorf("getSource() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -77,11 +77,10 @@ func TestValidateStriping(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if err := validateStriping(ts.parameters); (err != nil) != ts.wantErr {
|
if err := validateStriping(tt.parameters); (err != nil) != tt.wantErr {
|
||||||
t.Errorf("validateStriping() error = %v, wantErr %v", err, ts.wantErr)
|
t.Errorf("validateStriping() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -76,23 +76,22 @@ func TestParseEncryptionOpts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.testName, func(t *testing.T) {
|
||||||
t.Run(newtt.testName, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
actualKMS, actualEnc, actualErr := ParseEncryptionOpts(
|
actualKMS, actualEnc, actualErr := ParseEncryptionOpts(
|
||||||
newtt.volOptions,
|
tt.volOptions,
|
||||||
newtt.fallbackType,
|
tt.fallbackType,
|
||||||
)
|
)
|
||||||
if actualKMS != newtt.expectedKMS {
|
if actualKMS != tt.expectedKMS {
|
||||||
t.Errorf("Expected KMS ID: %s, but got: %s", newtt.expectedKMS, actualKMS)
|
t.Errorf("Expected KMS ID: %s, but got: %s", tt.expectedKMS, actualKMS)
|
||||||
}
|
}
|
||||||
|
|
||||||
if actualEnc != newtt.expectedEnc {
|
if actualEnc != tt.expectedEnc {
|
||||||
t.Errorf("Expected Encryption Type: %v, but got: %v", newtt.expectedEnc, actualEnc)
|
t.Errorf("Expected Encryption Type: %v, but got: %v", tt.expectedEnc, actualEnc)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (actualErr != nil) != newtt.expectedErr {
|
if (actualErr != nil) != tt.expectedErr {
|
||||||
t.Errorf("expected error %v but got %v", newtt.expectedErr, actualErr)
|
t.Errorf("expected error %v but got %v", tt.expectedErr, actualErr)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -54,12 +54,11 @@ func TestIsMigrationVolID(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got := isMigrationVolID(newtt.args)
|
got := isMigrationVolID(tt.args)
|
||||||
if got != newtt.migVolID {
|
if got != tt.migVolID {
|
||||||
t.Errorf("isMigrationVolID() = %v, want %v", got, newtt.migVolID)
|
t.Errorf("isMigrationVolID() = %v, want %v", got, tt.migVolID)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -156,17 +155,16 @@ func TestParseMigrationVolID(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got, err := parseMigrationVolID(newtt.args)
|
got, err := parseMigrationVolID(tt.args)
|
||||||
if (err != nil) != newtt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("ParseMigrationVolID() error = %v, wantErr %v", err, newtt.wantErr)
|
t.Errorf("ParseMigrationVolID() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(got, newtt.want) {
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
t.Errorf("ParseMigrationVolID() got = %v, want %v", got, newtt.want)
|
t.Errorf("ParseMigrationVolID() got = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -104,11 +104,10 @@ func TestParseBoolOption(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tc := tt
|
val := parseBoolOption(ctx, tt.scParameters, optionName, defaultValue)
|
||||||
val := parseBoolOption(ctx, tc.scParameters, optionName, defaultValue)
|
if val != tt.expect {
|
||||||
if val != tc.expect {
|
|
||||||
t.Errorf("parseBoolOption(%v) returned: %t, expected: %t",
|
t.Errorf("parseBoolOption(%v) returned: %t, expected: %t",
|
||||||
tc.scParameters, val, tc.expect)
|
tt.scParameters, val, tt.expect)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -188,15 +187,14 @@ func TestNodeServer_appendReadAffinityMapOptions(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
currentTT := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(currentTT.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
rv := &rbdVolume{
|
rv := &rbdVolume{
|
||||||
MapOptions: currentTT.args.mapOptions,
|
MapOptions: tt.args.mapOptions,
|
||||||
Mounter: currentTT.args.mounter,
|
Mounter: tt.args.mounter,
|
||||||
}
|
}
|
||||||
rv.appendReadAffinityMapOptions(currentTT.args.readAffinityMapOptions)
|
rv.appendReadAffinityMapOptions(tt.args.readAffinityMapOptions)
|
||||||
require.Equal(t, currentTT.want, rv.MapOptions)
|
require.Equal(t, tt.want, rv.MapOptions)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -294,10 +292,9 @@ func TestReadAffinity_GetReadAffinityMapOptions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tc := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
crushLocationMap := util.GetCrushLocationMap(tc.CLICrushLocationLabels, nodeLabels)
|
crushLocationMap := util.GetCrushLocationMap(tt.CLICrushLocationLabels, nodeLabels)
|
||||||
cliReadAffinityMapOptions := util.ConstructReadAffinityMapOption(crushLocationMap)
|
cliReadAffinityMapOptions := util.ConstructReadAffinityMapOption(crushLocationMap)
|
||||||
driver := &csicommon.CSIDriver{}
|
driver := &csicommon.CSIDriver{}
|
||||||
|
|
||||||
@ -307,13 +304,13 @@ func TestReadAffinity_GetReadAffinityMapOptions(t *testing.T) {
|
|||||||
),
|
),
|
||||||
}
|
}
|
||||||
readAffinityMapOptions, err := util.GetReadAffinityMapOptions(
|
readAffinityMapOptions, err := util.GetReadAffinityMapOptions(
|
||||||
tmpConfPath, tc.clusterID, ns.CLIReadAffinityOptions, nodeLabels,
|
tmpConfPath, tt.clusterID, ns.CLIReadAffinityOptions, nodeLabels,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
require.Fail(t, err.Error())
|
require.Fail(t, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
require.Equal(t, tc.want, readAffinityMapOptions)
|
require.Equal(t, tt.want, readAffinityMapOptions)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -82,24 +82,23 @@ func TestParseMapOptions(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tc := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
krbdOpts, nbdOpts, err := parseMapOptions(tc.mapOption)
|
krbdOpts, nbdOpts, err := parseMapOptions(tt.mapOption)
|
||||||
if err != nil && !strings.Contains(err.Error(), tc.expectErr) {
|
if err != nil && !strings.Contains(err.Error(), tt.expectErr) {
|
||||||
// returned error
|
// returned error
|
||||||
t.Errorf("parseMapOptions(%s) returned error, expected: %v, got: %v",
|
t.Errorf("parseMapOptions(%s) returned error, expected: %v, got: %v",
|
||||||
tc.mapOption, tc.expectErr, err)
|
tt.mapOption, tt.expectErr, err)
|
||||||
}
|
}
|
||||||
if krbdOpts != tc.expectKrbdOptions {
|
if krbdOpts != tt.expectKrbdOptions {
|
||||||
// unexpected krbd option error
|
// unexpected krbd option error
|
||||||
t.Errorf("parseMapOptions(%s) returned unexpected krbd options, expected :%q, got: %q",
|
t.Errorf("parseMapOptions(%s) returned unexpected krbd options, expected :%q, got: %q",
|
||||||
tc.mapOption, tc.expectKrbdOptions, krbdOpts)
|
tt.mapOption, tt.expectKrbdOptions, krbdOpts)
|
||||||
}
|
}
|
||||||
if nbdOpts != tc.expectNbdOptions {
|
if nbdOpts != tt.expectNbdOptions {
|
||||||
// unexpected nbd option error
|
// unexpected nbd option error
|
||||||
t.Errorf("parseMapOptions(%s) returned unexpected nbd options, expected: %q, got: %q",
|
t.Errorf("parseMapOptions(%s) returned unexpected nbd options, expected: %q, got: %q",
|
||||||
tc.mapOption, tc.expectNbdOptions, nbdOpts)
|
tt.mapOption, tt.expectNbdOptions, nbdOpts)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -233,7 +233,6 @@ func TestGetCephClientLogFileName(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
val := getCephClientLogFileName(tt.args.id, tt.args.logDir, tt.args.prefix)
|
val := getCephClientLogFileName(tt.args.id, tt.args.logDir, tt.args.prefix)
|
||||||
@ -289,7 +288,6 @@ func TestStrategicActionOnLogFile(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
strategicActionOnLogFile(ctx, tt.args.logStrategy, tt.args.logFile)
|
strategicActionOnLogFile(ctx, tt.args.logStrategy, tt.args.logFile)
|
||||||
@ -337,8 +335,7 @@ func TestIsKrbdFeatureSupported(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tc := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
var err error
|
var err error
|
||||||
krbdSupportedFeaturesAttr := "0x1"
|
krbdSupportedFeaturesAttr := "0x1"
|
||||||
@ -349,12 +346,12 @@ func TestIsKrbdFeatureSupported(t *testing.T) {
|
|||||||
// In case /sys/bus/rbd/supported_features is absent and we are
|
// In case /sys/bus/rbd/supported_features is absent and we are
|
||||||
// not in a position to prepare krbd feature attributes,
|
// not in a position to prepare krbd feature attributes,
|
||||||
// isKrbdFeatureSupported returns error ErrNotExist
|
// isKrbdFeatureSupported returns error ErrNotExist
|
||||||
supported, err := isKrbdFeatureSupported(ctx, tc.featureName)
|
supported, err := isKrbdFeatureSupported(ctx, tt.featureName)
|
||||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||||
t.Errorf("isKrbdFeatureSupported(%s) returned error: %v", tc.featureName, err)
|
t.Errorf("isKrbdFeatureSupported(%s) returned error: %v", tt.featureName, err)
|
||||||
} else if supported != tc.isSupported {
|
} else if supported != tt.isSupported {
|
||||||
t.Errorf("isKrbdFeatureSupported(%s) returned supported status, expected: %t, got: %t",
|
t.Errorf("isKrbdFeatureSupported(%s) returned supported status, expected: %t, got: %t",
|
||||||
tc.featureName, tc.isSupported, supported)
|
tt.featureName, tt.isSupported, supported)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -382,11 +379,10 @@ func Test_checkValidImageFeatures(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tc := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if got := checkValidImageFeatures(tc.imageFeatures, tc.ok); got != tc.want {
|
if got := checkValidImageFeatures(tt.imageFeatures, tt.ok); got != tt.want {
|
||||||
t.Errorf("checkValidImageFeatures() = %v, want %v", got, tc.want)
|
t.Errorf("checkValidImageFeatures() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -64,25 +64,24 @@ func TestExecCommandWithTimeout(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
stdout, _, err := ExecCommandWithTimeout(newtt.args.ctx,
|
stdout, _, err := ExecCommandWithTimeout(tt.args.ctx,
|
||||||
newtt.args.timeout,
|
tt.args.timeout,
|
||||||
newtt.args.program,
|
tt.args.program,
|
||||||
newtt.args.args...)
|
tt.args.args...)
|
||||||
if (err != nil) != newtt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("ExecCommandWithTimeout() error = %v, wantErr %v", err, newtt.wantErr)
|
t.Errorf("ExecCommandWithTimeout() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if newtt.wantErr && !errors.Is(err, newtt.expectedErr) {
|
if tt.wantErr && !errors.Is(err, tt.expectedErr) {
|
||||||
t.Errorf("ExecCommandWithTimeout() error expected got = %v, want %v", err, newtt.expectedErr)
|
t.Errorf("ExecCommandWithTimeout() error expected got = %v, want %v", err, tt.expectedErr)
|
||||||
}
|
}
|
||||||
|
|
||||||
if stdout != newtt.stdout {
|
if stdout != tt.stdout {
|
||||||
t.Errorf("ExecCommandWithTimeout() got = %v, want %v", stdout, newtt.stdout)
|
t.Errorf("ExecCommandWithTimeout() got = %v, want %v", stdout, tt.stdout)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -138,23 +138,21 @@ func TestGetClusterMappingInfo(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for i, tt := range tests {
|
for i, tt := range tests {
|
||||||
currentI := i
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
currentTT := tt
|
|
||||||
t.Run(currentTT.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
mappingConfigFile := fmt.Sprintf("%s/mapping-%d.json", mappingBasePath, currentI)
|
mappingConfigFile := fmt.Sprintf("%s/mapping-%d.json", mappingBasePath, i)
|
||||||
if len(currentTT.mappingFilecontent) != 0 {
|
if len(tt.mappingFilecontent) != 0 {
|
||||||
err = os.WriteFile(mappingConfigFile, currentTT.mappingFilecontent, 0o600)
|
err = os.WriteFile(mappingConfigFile, tt.mappingFilecontent, 0o600)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to write to %q, error = %v", mappingConfigFile, err)
|
t.Errorf("failed to write to %q, error = %v", mappingConfigFile, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
data, mErr := getClusterMappingInfo(currentTT.clusterID, mappingConfigFile)
|
data, mErr := getClusterMappingInfo(tt.clusterID, mappingConfigFile)
|
||||||
if (mErr != nil) != currentTT.expectErr {
|
if (mErr != nil) != tt.expectErr {
|
||||||
t.Errorf("getClusterMappingInfo() error = %v, expected Error %v", mErr, currentTT.expectErr)
|
t.Errorf("getClusterMappingInfo() error = %v, expected Error %v", mErr, tt.expectErr)
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(data, currentTT.expectedData) {
|
if !reflect.DeepEqual(data, tt.expectedData) {
|
||||||
t.Errorf("getClusterMappingInfo() = %v, expected data %v", data, currentTT.expectedData)
|
t.Errorf("getClusterMappingInfo() = %v, expected data %v", data, tt.expectedData)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -285,7 +283,6 @@ func TestGetMappedID(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
val := GetMappedID(tt.args.key, tt.args.value, tt.args.id)
|
val := GetMappedID(tt.args.key, tt.args.value, tt.args.id)
|
||||||
@ -407,7 +404,6 @@ func TestFetchMappedClusterIDAndMons(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tt := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got, got1, err := fetchMappedClusterIDAndMons(ctx, tt.args.clusterID, clusterMappingConfigFile, csiConfigFile)
|
got, got1, err := fetchMappedClusterIDAndMons(ctx, tt.args.clusterID, clusterMappingConfigFile, csiConfigFile)
|
||||||
|
@ -39,11 +39,10 @@ func TestIsMigrationSecret(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if got := isMigrationSecret(newtt.vc); got != newtt.want {
|
if got := isMigrationSecret(tt.vc); got != tt.want {
|
||||||
t.Errorf("isMigrationSecret() = %v, want %v", got, newtt.want)
|
t.Errorf("isMigrationSecret() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -83,17 +82,16 @@ func TestParseAndSetSecretMapFromMigSecret(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
newtt := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(newtt.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got, err := ParseAndSetSecretMapFromMigSecret(newtt.secretmap)
|
got, err := ParseAndSetSecretMapFromMigSecret(tt.secretmap)
|
||||||
if (err != nil) != newtt.wantErr {
|
if (err != nil) != tt.wantErr {
|
||||||
t.Errorf("ParseAndSetSecretMapFromMigSecret() error = %v, wantErr %v", err, newtt.wantErr)
|
t.Errorf("ParseAndSetSecretMapFromMigSecret() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(got, newtt.want) {
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
t.Errorf("ParseAndSetSecretMapFromMigSecret() got = %v, want %v", got, newtt.want)
|
t.Errorf("ParseAndSetSecretMapFromMigSecret() got = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -102,12 +102,11 @@ func Test_getCrushLocationMap(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
currentTT := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(currentTT.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
require.Equal(t,
|
require.Equal(t,
|
||||||
currentTT.want,
|
tt.want,
|
||||||
getCrushLocationMap(currentTT.args.crushLocationLabels, currentTT.args.nodeLabels))
|
getCrushLocationMap(tt.args.crushLocationLabels, tt.args.nodeLabels))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -199,17 +199,16 @@ func TestGetRBDNetNamespaceFilePath(t *testing.T) {
|
|||||||
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got, err := GetRBDNetNamespaceFilePath(tmpConfPath, ts.clusterID)
|
got, err := GetRBDNetNamespaceFilePath(tmpConfPath, tt.clusterID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("GetRBDNetNamespaceFilePath() error = %v", err)
|
t.Errorf("GetRBDNetNamespaceFilePath() error = %v", err)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if got != ts.want {
|
if got != tt.want {
|
||||||
t.Errorf("GetRBDNetNamespaceFilePath() = %v, want %v", got, ts.want)
|
t.Errorf("GetRBDNetNamespaceFilePath() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -269,17 +268,16 @@ func TestGetCephFSNetNamespaceFilePath(t *testing.T) {
|
|||||||
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got, err := GetCephFSNetNamespaceFilePath(tmpConfPath, ts.clusterID)
|
got, err := GetCephFSNetNamespaceFilePath(tmpConfPath, tt.clusterID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("GetCephFSNetNamespaceFilePath() error = %v", err)
|
t.Errorf("GetCephFSNetNamespaceFilePath() error = %v", err)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if got != ts.want {
|
if got != tt.want {
|
||||||
t.Errorf("GetCephFSNetNamespaceFilePath() = %v, want %v", got, ts.want)
|
t.Errorf("GetCephFSNetNamespaceFilePath() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -339,17 +337,16 @@ func TestGetNFSNetNamespaceFilePath(t *testing.T) {
|
|||||||
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got, err := GetNFSNetNamespaceFilePath(tmpConfPath, ts.clusterID)
|
got, err := GetNFSNetNamespaceFilePath(tmpConfPath, tt.clusterID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("GetNFSNetNamespaceFilePath() error = %v", err)
|
t.Errorf("GetNFSNetNamespaceFilePath() error = %v", err)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if got != ts.want {
|
if got != tt.want {
|
||||||
t.Errorf("GetNFSNetNamespaceFilePath() = %v, want %v", got, ts.want)
|
t.Errorf("GetNFSNetNamespaceFilePath() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -443,17 +440,16 @@ func TestGetReadAffinityOptions(t *testing.T) {
|
|||||||
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tc := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
enabled, labels, err := GetCrushLocationLabels(tmpConfPath, tc.clusterID)
|
enabled, labels, err := GetCrushLocationLabels(tmpConfPath, tt.clusterID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("GetCrushLocationLabels() error = %v", err)
|
t.Errorf("GetCrushLocationLabels() error = %v", err)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if enabled != tc.want.enabled || labels != tc.want.labels {
|
if enabled != tt.want.enabled || labels != tt.want.labels {
|
||||||
t.Errorf("GetCrushLocationLabels() = {%v %v} want %v", enabled, labels, tc.want)
|
t.Errorf("GetCrushLocationLabels() = {%v %v} want %v", enabled, labels, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -518,16 +514,15 @@ func TestGetCephFSMountOptions(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
tc := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
kernelMntOptions, fuseMntOptions, err := GetCephFSMountOptions(tmpConfPath, tc.clusterID)
|
kernelMntOptions, fuseMntOptions, err := GetCephFSMountOptions(tmpConfPath, tt.clusterID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("GetCephFSMountOptions() error = %v", err)
|
t.Errorf("GetCephFSMountOptions() error = %v", err)
|
||||||
}
|
}
|
||||||
if kernelMntOptions != tc.wantKernelMntOptions || fuseMntOptions != tc.wantFuseMntOptions {
|
if kernelMntOptions != tt.wantKernelMntOptions || fuseMntOptions != tt.wantFuseMntOptions {
|
||||||
t.Errorf("GetCephFSMountOptions() = (%v, %v), want (%v, %v)",
|
t.Errorf("GetCephFSMountOptions() = (%v, %v), want (%v, %v)",
|
||||||
kernelMntOptions, fuseMntOptions, tc.wantKernelMntOptions, tc.wantFuseMntOptions,
|
kernelMntOptions, fuseMntOptions, tt.wantKernelMntOptions, tt.wantFuseMntOptions,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@ -588,18 +583,17 @@ func TestGetRBDMirrorDaemonCount(t *testing.T) {
|
|||||||
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
t.Errorf("failed to write %s file content: %v", CsiConfigFile, err)
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
var got int
|
var got int
|
||||||
got, err = GetRBDMirrorDaemonCount(tmpConfPath, ts.clusterID)
|
got, err = GetRBDMirrorDaemonCount(tmpConfPath, tt.clusterID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("GetRBDMirrorDaemonCount() error = %v", err)
|
t.Errorf("GetRBDMirrorDaemonCount() error = %v", err)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if got != ts.want {
|
if got != tt.want {
|
||||||
t.Errorf("GetRBDMirrorDaemonCount() = %v, want %v", got, ts.want)
|
t.Errorf("GetRBDMirrorDaemonCount() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -50,12 +50,11 @@ func TestRemoveCSIPrefixedParameters(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
got := RemoveCSIPrefixedParameters(ts.param)
|
got := RemoveCSIPrefixedParameters(tt.param)
|
||||||
if !reflect.DeepEqual(got, ts.want) {
|
if !reflect.DeepEqual(got, tt.want) {
|
||||||
t.Errorf("RemoveCSIPrefixedParameters() = %v, want %v", got, ts.want)
|
t.Errorf("RemoveCSIPrefixedParameters() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -84,11 +83,10 @@ func TestGetOwner(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if got := GetOwner(ts.args); got != ts.want {
|
if got := GetOwner(tt.args); got != tt.want {
|
||||||
t.Errorf("GetOwner() = %v, want %v", got, ts.want)
|
t.Errorf("GetOwner() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -59,10 +59,9 @@ func TestReadAffinity_ConstructReadAffinityMapOption(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
currentTT := tt
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
require.Contains(t, currentTT.wantAny, ConstructReadAffinityMapOption(currentTT.crushLocationmap))
|
require.Contains(t, tt.wantAny, ConstructReadAffinityMapOption(tt.crushLocationmap))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,11 +74,10 @@ func TestRoundOffBytes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if got := RoundOffBytes(ts.args.bytes); got != ts.want {
|
if got := RoundOffBytes(tt.args.bytes); got != tt.want {
|
||||||
t.Errorf("RoundOffBytes() = %v, want %v", got, ts.want)
|
t.Errorf("RoundOffBytes() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -138,11 +137,10 @@ func TestRoundOffVolSize(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if got := RoundOffVolSize(ts.args.size); got != ts.want {
|
if got := RoundOffVolSize(tt.args.size); got != tt.want {
|
||||||
t.Errorf("RoundOffVolSize() = %v, want %v", got, ts.want)
|
t.Errorf("RoundOffVolSize() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -233,13 +231,11 @@ func TestMountOptionsAdd(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, moaTest := range moaTests {
|
for _, moaTest := range moaTests {
|
||||||
mt := moaTest
|
|
||||||
moaTest := moaTest
|
|
||||||
t.Run(moaTest.name, func(t *testing.T) {
|
t.Run(moaTest.name, func(t *testing.T) {
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
result := MountOptionsAdd(mt.mountOptions, mt.option...)
|
result := MountOptionsAdd(moaTest.mountOptions, moaTest.option...)
|
||||||
if result != mt.result {
|
if result != moaTest.result {
|
||||||
t.Errorf("MountOptionsAdd(): %v, want %v", result, mt.result)
|
t.Errorf("MountOptionsAdd(): %v, want %v", result, moaTest.result)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -402,11 +398,10 @@ func TestRoundOffCephFSVolSize(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
ts := tt
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
t.Run(ts.name, func(t *testing.T) {
|
|
||||||
t.Parallel()
|
t.Parallel()
|
||||||
if got := RoundOffCephFSVolSize(ts.size); got != ts.want {
|
if got := RoundOffCephFSVolSize(tt.size); got != tt.want {
|
||||||
t.Errorf("RoundOffCephFSVolSize() = %v, want %v", got, ts.want)
|
t.Errorf("RoundOffCephFSVolSize() = %v, want %v", got, tt.want)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user