mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-12-25 06:20:24 +00:00
ci: non-constant format string (govet)
Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
parent
2526e583c0
commit
c1ee4a5a06
@ -768,7 +768,7 @@ var _ = Describe(cephfsType, func() {
|
||||
for i := range deplPods {
|
||||
err = ensureStatSucceeds(deplPods[i].Name)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Failf("%v", err.Error())
|
||||
}
|
||||
}
|
||||
// Kill ceph-fuse in cephfs-csi node plugin Pods.
|
||||
@ -797,12 +797,12 @@ var _ = Describe(cephfsType, func() {
|
||||
// the pod with hopefully mounts working again.
|
||||
err = deletePod(pod2Name, depl.Namespace, c, deployTimeout)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Failf("%v", err.Error())
|
||||
}
|
||||
// Wait for the second Pod to be recreated.
|
||||
err = waitForDeploymentComplete(c, depl.Name, depl.Namespace, deployTimeout)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Failf("%v", err.Error())
|
||||
}
|
||||
// List Deployment's pods again to get name of the new pod.
|
||||
deplPods, err = listPods(f, depl.Namespace, &metav1.ListOptions{
|
||||
@ -828,7 +828,7 @@ var _ = Describe(cephfsType, func() {
|
||||
// Verify Pod pod2Name has its ceph-fuse mount working again.
|
||||
err = ensureStatSucceeds(pod2Name)
|
||||
if err != nil {
|
||||
framework.Failf(err.Error())
|
||||
framework.Failf("%v", err.Error())
|
||||
}
|
||||
|
||||
// Delete created resources.
|
||||
@ -1083,7 +1083,7 @@ var _ = Describe(cephfsType, func() {
|
||||
&opt)
|
||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||
if !strings.Contains(stdErr, readOnlyErr) {
|
||||
framework.Failf(stdErr)
|
||||
framework.Failf("%v", stdErr)
|
||||
}
|
||||
|
||||
// delete PVC and app
|
||||
@ -2413,7 +2413,7 @@ var _ = Describe(cephfsType, func() {
|
||||
&opt)
|
||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||
if !strings.Contains(stdErr, readOnlyErr) {
|
||||
framework.Failf(stdErr)
|
||||
framework.Failf("%v", stdErr)
|
||||
}
|
||||
|
||||
// delete cloned ROX pvc and app
|
||||
|
@ -614,7 +614,7 @@ var _ = Describe("nfs", func() {
|
||||
&opt)
|
||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||
if !strings.Contains(stdErr, readOnlyErr) {
|
||||
framework.Failf(stdErr)
|
||||
framework.Failf("%v", stdErr)
|
||||
}
|
||||
|
||||
// delete PVC and app
|
||||
|
10
e2e/rbd.go
10
e2e/rbd.go
@ -1665,7 +1665,7 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||
if !strings.Contains(stdErr, readOnlyErr) {
|
||||
framework.Failf(stdErr)
|
||||
framework.Failf("%v", stdErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -1798,7 +1798,7 @@ var _ = Describe("RBD", func() {
|
||||
}
|
||||
readOnlyErr := fmt.Sprintf("'%s': Operation not permitted", devPath)
|
||||
if !strings.Contains(stdErr, readOnlyErr) {
|
||||
framework.Failf(stdErr)
|
||||
framework.Failf("%v", stdErr)
|
||||
}
|
||||
}
|
||||
err = deletePVCAndDeploymentApp(f, pvcClone, appClone)
|
||||
@ -3357,7 +3357,7 @@ var _ = Describe("RBD", func() {
|
||||
&opt)
|
||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||
if !strings.Contains(stdErr, readOnlyErr) {
|
||||
framework.Failf(stdErr)
|
||||
framework.Failf("%v", stdErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -3471,7 +3471,7 @@ var _ = Describe("RBD", func() {
|
||||
&opt)
|
||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||
if !strings.Contains(stdErr, readOnlyErr) {
|
||||
framework.Failf(stdErr)
|
||||
framework.Failf("%v", stdErr)
|
||||
}
|
||||
}
|
||||
|
||||
@ -4133,7 +4133,7 @@ var _ = Describe("RBD", func() {
|
||||
&opt)
|
||||
readOnlyErr := fmt.Sprintf("cannot create %s: Read-only file system", filePath)
|
||||
if !strings.Contains(stdErr, readOnlyErr) {
|
||||
framework.Failf(stdErr)
|
||||
framework.Failf("%v", stdErr)
|
||||
}
|
||||
|
||||
// delete PVC and app
|
||||
|
@ -1071,7 +1071,7 @@ func waitToRemoveImagesFromTrash(f *framework.Framework, poolName string, t int)
|
||||
return true, nil
|
||||
}
|
||||
errReason = fmt.Errorf("found %d images found in trash. Image details %v", len(imagesInTrash), imagesInTrash)
|
||||
framework.Logf(errReason.Error())
|
||||
framework.Logf("%v", errReason.Error())
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
@ -109,7 +109,7 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
if conf.IsNodeServer && k8s.RunsOnKubernetes() {
|
||||
nodeLabels, err = k8s.GetNodeLabels(conf.NodeID)
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@ -159,7 +159,7 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
if conf.IsNodeServer {
|
||||
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
fs.ns = NewNodeServer(
|
||||
fs.cd, conf.Vtype,
|
||||
@ -176,7 +176,7 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
if !conf.IsControllerServer && !conf.IsNodeServer {
|
||||
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
fs.ns = NewNodeServer(
|
||||
fs.cd, conf.Vtype,
|
||||
@ -189,7 +189,7 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
// configure CSI-Addons server and components
|
||||
err = fs.setupCSIAddonsServer(conf)
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
|
||||
server := csicommon.NewNonBlockingGRPCServer()
|
||||
|
@ -436,7 +436,7 @@ func getBackingSnapshotRoot(
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to open %s when searching for snapshot root: %v", snapshotsBase, err)
|
||||
|
||||
return "", status.Errorf(codes.Internal, err.Error())
|
||||
return "", status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
defer dir.Close()
|
||||
|
||||
@ -446,7 +446,7 @@ func getBackingSnapshotRoot(
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "failed to read %s when searching for snapshot root: %v", snapshotsBase, err)
|
||||
|
||||
return "", status.Errorf(codes.Internal, err.Error())
|
||||
return "", status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
var (
|
||||
|
@ -103,7 +103,7 @@ func (r *ReconcilePersistentVolume) getCredentials(
|
||||
|
||||
if name == "" || namespace == "" {
|
||||
errStr := "secret name or secret namespace is empty"
|
||||
log.ErrorLogMsg(errStr)
|
||||
log.ErrorLogMsg("%v", errStr)
|
||||
|
||||
return nil, errors.New(errStr)
|
||||
}
|
||||
|
@ -73,9 +73,9 @@ func (ekrs *EncryptionKeyRotationServer) EncryptionKeyRotate(
|
||||
err = status.Errorf(codes.NotFound, "volume ID %s not found", volID)
|
||||
case errors.Is(err, util.ErrPoolNotFound):
|
||||
log.ErrorLog(ctx, "failed to get backend volume for %s: %v", volID, err)
|
||||
err = status.Errorf(codes.NotFound, err.Error())
|
||||
err = status.Error(codes.NotFound, err.Error())
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
err = status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
|
@ -123,7 +123,7 @@ func getForceOption(ctx context.Context, parameters map[string]string) (bool, er
|
||||
}
|
||||
force, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return false, status.Errorf(codes.Internal, err.Error())
|
||||
return false, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return force, nil
|
||||
@ -636,7 +636,7 @@ func (rs *ReplicationServer) ResyncVolume(ctx context.Context,
|
||||
// it takes time for this operation.
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, err.Error())
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
|
||||
if info.GetState() != librbd.MirrorImageEnabled.String() {
|
||||
@ -832,11 +832,11 @@ func (rs *ReplicationServer) GetVolumeReplicationInfo(ctx context.Context,
|
||||
if err != nil {
|
||||
switch {
|
||||
case errors.Is(err, corerbd.ErrImageNotFound):
|
||||
err = status.Errorf(codes.NotFound, err.Error())
|
||||
err = status.Error(codes.NotFound, err.Error())
|
||||
case errors.Is(err, util.ErrPoolNotFound):
|
||||
err = status.Errorf(codes.NotFound, err.Error())
|
||||
err = status.Error(codes.NotFound, err.Error())
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
err = status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
|
@ -64,7 +64,7 @@ func recordLiveness(endpoint, drivername string, pollTime, timeout time.Duration
|
||||
// register prometheus metrics
|
||||
err := prometheus.Register(liveness)
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
|
||||
csiConn, err := connlib.Connect(context.Background(), endpoint, liveMetricsManager)
|
||||
|
@ -1122,9 +1122,9 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
err = status.Errorf(codes.NotFound, "source Volume ID %s not found", req.GetSourceVolumeId())
|
||||
case errors.Is(err, util.ErrPoolNotFound):
|
||||
log.ErrorLog(ctx, "failed to get backend volume for %s: %v", req.GetSourceVolumeId(), err)
|
||||
err = status.Errorf(codes.NotFound, err.Error())
|
||||
err = status.Error(codes.NotFound, err.Error())
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
err = status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
@ -1171,7 +1171,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
return nil, status.Error(codes.AlreadyExists, err.Error())
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if found {
|
||||
return cloneFromSnapshot(ctx, rbdVol, rbdSnap, cr, req.GetParameters())
|
||||
@ -1253,7 +1253,7 @@ func cloneFromSnapshot(
|
||||
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
defer vol.Destroy(ctx)
|
||||
|
||||
@ -1265,14 +1265,14 @@ func cloneFromSnapshot(
|
||||
err = vol.flattenRbdImage(ctx, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
||||
if errors.Is(err, ErrFlattenInProgress) {
|
||||
// if flattening is in progress, return error and do not cleanup
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
} else if err != nil {
|
||||
uErr := undoSnapshotCloning(ctx, rbdVol, rbdSnap, vol, cr)
|
||||
if uErr != nil {
|
||||
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", rbdSnap.RequestName, uErr)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
// Update snapshot-name/snapshot-namespace/snapshotcontent-name details on
|
||||
@ -1566,9 +1566,9 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
err = status.Errorf(codes.NotFound, "volume ID %s not found", volID)
|
||||
case errors.Is(err, util.ErrPoolNotFound):
|
||||
log.ErrorLog(ctx, "failed to get backend volume for %s: %v", volID, err)
|
||||
err = status.Errorf(codes.NotFound, err.Error())
|
||||
err = status.Error(codes.NotFound, err.Error())
|
||||
default:
|
||||
err = status.Errorf(codes.Internal, err.Error())
|
||||
err = status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
return nil, err
|
||||
|
@ -143,7 +143,7 @@ func (r *Driver) Run(conf *util.Config) {
|
||||
if k8s.RunsOnKubernetes() && conf.IsNodeServer {
|
||||
nodeLabels, err = k8s.GetNodeLabels(conf.NodeID)
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
@ -157,19 +157,19 @@ func (r *Driver) Run(conf *util.Config) {
|
||||
if conf.IsNodeServer {
|
||||
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
r.ns = NewNodeServer(r.cd, conf.Vtype, nodeLabels, topology, crushLocationMap)
|
||||
|
||||
var attr string
|
||||
attr, err = rbd.GetKrbdSupportedFeatures()
|
||||
if err != nil && !errors.Is(err, os.ErrNotExist) {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
var krbdFeatures uint
|
||||
krbdFeatures, err = rbd.HexStringToInteger(attr)
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
rbd.SetGlobalInt("krbdFeatures", krbdFeatures)
|
||||
|
||||
@ -185,7 +185,7 @@ func (r *Driver) Run(conf *util.Config) {
|
||||
// configure CSI-Addons server and components
|
||||
err = r.setupCSIAddonsServer(conf)
|
||||
if err != nil {
|
||||
log.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg("%v", err.Error())
|
||||
}
|
||||
|
||||
s := csicommon.NewNonBlockingGRPCServer()
|
||||
|
@ -26,7 +26,7 @@ import (
|
||||
func checkError(t *testing.T, msg string, err error) {
|
||||
t.Helper()
|
||||
if err == nil {
|
||||
t.Errorf(msg)
|
||||
t.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user