mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
cleanup: resolves gofumpt issues of internal codes
This PR runs gofumpt for internal folder. Updates: #1586 Signed-off-by: Yati Padia <ypadia@redhat.com>
This commit is contained in:
@ -22,7 +22,6 @@ import (
|
||||
"strconv"
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/journal"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
|
||||
librbd "github.com/ceph/go-ceph/rbd"
|
||||
@ -600,8 +599,7 @@ func (cs *ControllerServer) createBackingImage(
|
||||
rbdSnap *rbdSnapshot) error {
|
||||
var err error
|
||||
|
||||
var j = &journal.Connection{}
|
||||
j, err = volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
|
||||
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -747,8 +745,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
}
|
||||
defer cs.OperationLocks.ReleaseDeleteLock(volumeID)
|
||||
|
||||
var rbdVol = &rbdVolume{}
|
||||
rbdVol, err = genVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
||||
rbdVol, err := genVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
|
||||
defer rbdVol.Destroy()
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
@ -871,9 +868,8 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
var rbdVol = &rbdVolume{}
|
||||
// Fetch source volume information
|
||||
rbdVol, err = genVolFromVolID(ctx, req.GetSourceVolumeId(), cr, req.GetSecrets())
|
||||
rbdVol, err := genVolFromVolID(ctx, req.GetSourceVolumeId(), cr, req.GetSecrets())
|
||||
defer rbdVol.Destroy()
|
||||
if err != nil {
|
||||
switch {
|
||||
@ -950,9 +946,8 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
}()
|
||||
|
||||
var ready bool
|
||||
var vol = new(rbdVolume)
|
||||
|
||||
ready, vol, err = cs.doSnapshotClone(ctx, rbdVol, rbdSnap, cr)
|
||||
ready, vol, err := cs.doSnapshotClone(ctx, rbdVol, rbdSnap, cr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1145,9 +1140,8 @@ func (cs *ControllerServer) doSnapshotClone(
|
||||
util.ErrorLog(ctx, "failed to get image id: %v", err)
|
||||
return ready, cloneRbd, err
|
||||
}
|
||||
var j = &journal.Connection{}
|
||||
// save image ID
|
||||
j, err = snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
|
||||
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to connect to cluster: %v", err)
|
||||
return ready, cloneRbd, err
|
||||
@ -1315,8 +1309,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
var rbdVol = &rbdVolume{}
|
||||
rbdVol, err = genVolFromVolID(ctx, volID, cr, req.GetSecrets())
|
||||
rbdVol, err := genVolFromVolID(ctx, volID, cr, req.GetSecrets())
|
||||
defer rbdVol.Destroy()
|
||||
if err != nil {
|
||||
switch {
|
||||
|
@ -142,8 +142,10 @@ func (r *Driver) Run(conf *util.Config) {
|
||||
// MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first
|
||||
// will work those as follow up features
|
||||
r.cd.AddVolumeCapabilityAccessModes(
|
||||
[]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
|
||||
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
|
||||
[]csi.VolumeCapability_AccessMode_Mode{
|
||||
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
|
||||
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
|
||||
})
|
||||
}
|
||||
|
||||
// Create GRPC servers
|
||||
|
@ -342,7 +342,7 @@ func (ns *NodeServer) stageTransaction(
|
||||
|
||||
if !readOnly {
|
||||
// #nosec - allow anyone to write inside the target path
|
||||
err = os.Chmod(stagingTargetPath, 0777)
|
||||
err = os.Chmod(stagingTargetPath, 0o777)
|
||||
}
|
||||
return transaction, err
|
||||
}
|
||||
@ -400,7 +400,7 @@ func (ns *NodeServer) undoStagingTransaction(
|
||||
func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath string, isBlock bool) error {
|
||||
if isBlock {
|
||||
// #nosec:G304, intentionally creating file mountPath, not a security issue
|
||||
pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0600)
|
||||
pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o600)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
@ -413,7 +413,7 @@ func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath strin
|
||||
return nil
|
||||
}
|
||||
|
||||
err := os.Mkdir(mountPath, 0750)
|
||||
err := os.Mkdir(mountPath, 0o750)
|
||||
if err != nil {
|
||||
if !os.IsExist(err) {
|
||||
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
|
||||
@ -582,7 +582,7 @@ func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath strin
|
||||
}
|
||||
if isBlock {
|
||||
// #nosec
|
||||
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0750)
|
||||
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o750)
|
||||
if e != nil {
|
||||
util.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err)
|
||||
return notMnt, status.Error(codes.Internal, e.Error())
|
||||
|
@ -362,9 +362,7 @@ func (rv *rbdVolume) repairImageID(ctx context.Context, j *journal.Connection) e
|
||||
// reserveSnap is a helper routine to request a rbdSnapshot name reservation and generate the
|
||||
// volume ID for the generated name.
|
||||
func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, cr *util.Credentials) error {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
var err error
|
||||
|
||||
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdSnap.Monitors, rbdSnap.JournalPool, rbdSnap.Pool, cr)
|
||||
if err != nil {
|
||||
@ -435,9 +433,7 @@ func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
|
||||
// reserveVol is a helper routine to request a rbdVolume name reservation and generate the
|
||||
// volume ID for the generated name.
|
||||
func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
var err error
|
||||
|
||||
err = updateTopologyConstraints(rbdVol, rbdSnap)
|
||||
if err != nil {
|
||||
|
@ -165,20 +165,18 @@ type imageFeature struct {
|
||||
dependsOn []string
|
||||
}
|
||||
|
||||
var (
|
||||
supportedFeatures = map[string]imageFeature{
|
||||
librbd.FeatureNameLayering: {
|
||||
needRbdNbd: false,
|
||||
},
|
||||
librbd.FeatureNameExclusiveLock: {
|
||||
needRbdNbd: true,
|
||||
},
|
||||
librbd.FeatureNameJournaling: {
|
||||
needRbdNbd: true,
|
||||
dependsOn: []string{librbd.FeatureNameExclusiveLock},
|
||||
},
|
||||
}
|
||||
)
|
||||
var supportedFeatures = map[string]imageFeature{
|
||||
librbd.FeatureNameLayering: {
|
||||
needRbdNbd: false,
|
||||
},
|
||||
librbd.FeatureNameExclusiveLock: {
|
||||
needRbdNbd: true,
|
||||
},
|
||||
librbd.FeatureNameJournaling: {
|
||||
needRbdNbd: true,
|
||||
dependsOn: []string{librbd.FeatureNameExclusiveLock},
|
||||
},
|
||||
}
|
||||
|
||||
// Connect an rbdVolume to the Ceph cluster.
|
||||
func (ri *rbdImage) Connect(cr *util.Credentials) error {
|
||||
@ -491,7 +489,6 @@ func addRbdManagerTask(ctx context.Context, pOpts *rbdVolume, arg []string) (boo
|
||||
pOpts.Pool)
|
||||
supported := true
|
||||
_, stderr, err := util.ExecCommand(ctx, "ceph", args...)
|
||||
|
||||
if err != nil {
|
||||
switch {
|
||||
case strings.Contains(stderr, rbdTaskRemoveCmdInvalidString1) &&
|
||||
@ -547,7 +544,8 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
|
||||
}
|
||||
|
||||
// attempt to use Ceph manager based deletion support if available
|
||||
args := []string{"trash", "remove",
|
||||
args := []string{
|
||||
"trash", "remove",
|
||||
pOpts.Pool + "/" + pOpts.ImageID,
|
||||
"--id", cr.ID,
|
||||
"--keyfile=" + cr.KeyFile,
|
||||
@ -1398,7 +1396,7 @@ func (ri *rbdImageMetadataStash) String() string {
|
||||
// stashRBDImageMetadata stashes required fields into the stashFileName at the passed in path, in
|
||||
// JSON format.
|
||||
func stashRBDImageMetadata(volOptions *rbdVolume, path string) error {
|
||||
var imgMeta = rbdImageMetadataStash{
|
||||
imgMeta := rbdImageMetadataStash{
|
||||
// there are no checks for this at present
|
||||
Version: 3, // nolint:gomnd // number specifies version.
|
||||
Pool: volOptions.Pool,
|
||||
@ -1419,7 +1417,7 @@ func stashRBDImageMetadata(volOptions *rbdVolume, path string) error {
|
||||
}
|
||||
|
||||
fPath := filepath.Join(path, stashFileName)
|
||||
err = ioutil.WriteFile(fPath, encodedBytes, 0600)
|
||||
err = ioutil.WriteFile(fPath, encodedBytes, 0o600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to stash JSON image metadata for image (%s) at path (%s): %w", volOptions, fPath, err)
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func getSchedulingDetails(parameters map[string]string) (admin.Interval, admin.S
|
||||
// validateSchedulingInterval return the interval as it is if its ending with
|
||||
// `m|h|d` or else it will return error.
|
||||
func validateSchedulingInterval(interval string) (admin.Interval, error) {
|
||||
var re = regexp.MustCompile(`^\d+[mhd]$`)
|
||||
re := regexp.MustCompile(`^\d+[mhd]$`)
|
||||
if re.MatchString(interval) {
|
||||
return admin.Interval(interval), nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user