cleanup: resolves gofumpt issues of internal codes

This PR runs gofumpt for internal folder.

Updates: #1586

Signed-off-by: Yati Padia <ypadia@redhat.com>
This commit is contained in:
Yati Padia 2021-07-13 17:51:05 +05:30 committed by mergify[bot]
parent 696ee496fc
commit f36d611ef9
25 changed files with 115 additions and 124 deletions

View File

@ -70,9 +70,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolume
func (ns *NodeServer) NodeStageVolume(
ctx context.Context,
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
var (
volOptions *volumeOptions
)
var volOptions *volumeOptions
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
return nil, err
}
@ -114,7 +112,6 @@ func (ns *NodeServer) NodeStageVolume(
// Check if the volume is already mounted
isMnt, err := util.IsMountPoint(stagingTargetPath)
if err != nil {
util.ErrorLog(ctx, "stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error())
@ -184,7 +181,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
if !csicommon.MountOptionContains(kernelMountOptions, readOnly) &&
!csicommon.MountOptionContains(fuseMountOptions, readOnly) {
// #nosec - allow anyone to write inside the stagingtarget path
err = os.Chmod(stagingTargetPath, 0777)
err = os.Chmod(stagingTargetPath, 0o777)
if err != nil {
util.ErrorLog(
ctx,
@ -240,7 +237,6 @@ func (ns *NodeServer) NodePublishVolume(
// Check if the volume is already mounted
isMnt, err := util.IsMountPoint(targetPath)
if err != nil {
util.ErrorLog(ctx, "stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error())

View File

@ -29,21 +29,19 @@ import (
"github.com/ceph/go-ceph/rados"
)
var (
// clusterAdditionalInfo contains information regarding if resize is
// supported in the particular cluster and subvolumegroup is
// created or not.
// Subvolumegroup creation and volume resize decisions are
// taken through this additional cluster information.
clusterAdditionalInfo = make(map[string]*localClusterState)
)
// clusterAdditionalInfo contains information regarding if resize is
// supported in the particular cluster and subvolumegroup is
// created or not.
// Subvolumegroup creation and volume resize decisions are
// taken through this additional cluster information.
var clusterAdditionalInfo = make(map[string]*localClusterState)
const (
cephEntityClientPrefix = "client."
// modeAllRWX can be used for setting permissions to Read-Write-eXecute
// for User, Group and Other.
modeAllRWX = 0777
modeAllRWX = 0o777
)
// Subvolume holds subvolume information. This includes only the needed members

View File

@ -181,7 +181,6 @@ func newVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVo
volOptions := req.GetParameters()
clusterData, err := getClusterInformation(volOptions)
if err != nil {
return nil, err
}
@ -445,7 +444,6 @@ func newVolumeOptionsFromStaticVolume(
opts.ProvisionVolume = !staticVol
clusterData, err := getClusterInformation(options)
if err != nil {
return nil, nil, err
}

View File

@ -44,8 +44,10 @@ type ReconcilePersistentVolume struct {
Locks *util.VolumeLocks
}
var _ reconcile.Reconciler = &ReconcilePersistentVolume{}
var _ ctrl.ContollerManager = &ReconcilePersistentVolume{}
var (
_ reconcile.Reconciler = &ReconcilePersistentVolume{}
_ ctrl.ContollerManager = &ReconcilePersistentVolume{}
)
// Init will add the ReconcilePersistentVolume to the list.
func Init() {

View File

@ -205,7 +205,6 @@ func panicHandler(
// this.
func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeGetVolumeStatsResponse, error) {
isMnt, err := util.IsMountPoint(targetPath)
if err != nil {
if os.IsNotExist(err) {
return nil, status.Errorf(codes.InvalidArgument, "targetpath %s does not exist", targetPath)

View File

@ -734,7 +734,7 @@ func (conn *Connection) Destroy() {
// volumeHandle and the newly generated volumeHandle.
func (conn *Connection) CheckNewUUIDMapping(ctx context.Context,
journalPool, volumeHandle string) (string, error) {
var cj = conn.config
cj := conn.config
// check if request name is already part of the directory omap
fetchKeys := []string{
@ -762,7 +762,7 @@ func (conn *Connection) CheckNewUUIDMapping(ctx context.Context,
// internal reference.
func (conn *Connection) ReserveNewUUIDMapping(ctx context.Context,
journalPool, oldVolumeHandle, newVolumeHandle string) error {
var cj = conn.config
cj := conn.config
setKeys := map[string]string{
cj.csiNameKeyPrefix + oldVolumeHandle: newVolumeHandle,

View File

@ -29,13 +29,11 @@ import (
"google.golang.org/grpc"
)
var (
liveness = prometheus.NewGauge(prometheus.GaugeOpts{
var liveness = prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: "csi",
Name: "liveness",
Help: "Liveness Probe",
})
)
})
func getLiveness(timeout time.Duration, csiConn *grpc.ClientConn) {
ctx, cancel := context.WithTimeout(context.Background(), timeout)

View File

@ -22,7 +22,6 @@ import (
"strconv"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util"
librbd "github.com/ceph/go-ceph/rbd"
@ -600,8 +599,7 @@ func (cs *ControllerServer) createBackingImage(
rbdSnap *rbdSnapshot) error {
var err error
var j = &journal.Connection{}
j, err = volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
if err != nil {
return status.Error(codes.Internal, err.Error())
}
@ -747,8 +745,7 @@ func (cs *ControllerServer) DeleteVolume(
}
defer cs.OperationLocks.ReleaseDeleteLock(volumeID)
var rbdVol = &rbdVolume{}
rbdVol, err = genVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
rbdVol, err := genVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
defer rbdVol.Destroy()
if err != nil {
if errors.Is(err, util.ErrPoolNotFound) {
@ -871,9 +868,8 @@ func (cs *ControllerServer) CreateSnapshot(
}
defer cr.DeleteCredentials()
var rbdVol = &rbdVolume{}
// Fetch source volume information
rbdVol, err = genVolFromVolID(ctx, req.GetSourceVolumeId(), cr, req.GetSecrets())
rbdVol, err := genVolFromVolID(ctx, req.GetSourceVolumeId(), cr, req.GetSecrets())
defer rbdVol.Destroy()
if err != nil {
switch {
@ -950,9 +946,8 @@ func (cs *ControllerServer) CreateSnapshot(
}()
var ready bool
var vol = new(rbdVolume)
ready, vol, err = cs.doSnapshotClone(ctx, rbdVol, rbdSnap, cr)
ready, vol, err := cs.doSnapshotClone(ctx, rbdVol, rbdSnap, cr)
if err != nil {
return nil, err
}
@ -1145,9 +1140,8 @@ func (cs *ControllerServer) doSnapshotClone(
util.ErrorLog(ctx, "failed to get image id: %v", err)
return ready, cloneRbd, err
}
var j = &journal.Connection{}
// save image ID
j, err = snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil {
util.ErrorLog(ctx, "failed to connect to cluster: %v", err)
return ready, cloneRbd, err
@ -1315,8 +1309,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
}
defer cr.DeleteCredentials()
var rbdVol = &rbdVolume{}
rbdVol, err = genVolFromVolID(ctx, volID, cr, req.GetSecrets())
rbdVol, err := genVolFromVolID(ctx, volID, cr, req.GetSecrets())
defer rbdVol.Destroy()
if err != nil {
switch {

View File

@ -142,8 +142,10 @@ func (r *Driver) Run(conf *util.Config) {
// MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first
// will work those as follow up features
r.cd.AddVolumeCapabilityAccessModes(
[]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
[]csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
})
}
// Create GRPC servers

View File

@ -342,7 +342,7 @@ func (ns *NodeServer) stageTransaction(
if !readOnly {
// #nosec - allow anyone to write inside the target path
err = os.Chmod(stagingTargetPath, 0777)
err = os.Chmod(stagingTargetPath, 0o777)
}
return transaction, err
}
@ -400,7 +400,7 @@ func (ns *NodeServer) undoStagingTransaction(
func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath string, isBlock bool) error {
if isBlock {
// #nosec:G304, intentionally creating file mountPath, not a security issue
pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0600)
pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o600)
if err != nil {
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
return status.Error(codes.Internal, err.Error())
@ -413,7 +413,7 @@ func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath strin
return nil
}
err := os.Mkdir(mountPath, 0750)
err := os.Mkdir(mountPath, 0o750)
if err != nil {
if !os.IsExist(err) {
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
@ -582,7 +582,7 @@ func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath strin
}
if isBlock {
// #nosec
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0750)
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o750)
if e != nil {
util.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err)
return notMnt, status.Error(codes.Internal, e.Error())

View File

@ -362,9 +362,7 @@ func (rv *rbdVolume) repairImageID(ctx context.Context, j *journal.Connection) e
// reserveSnap is a helper routine to request a rbdSnapshot name reservation and generate the
// volume ID for the generated name.
func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, cr *util.Credentials) error {
var (
err error
)
var err error
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdSnap.Monitors, rbdSnap.JournalPool, rbdSnap.Pool, cr)
if err != nil {
@ -435,9 +433,7 @@ func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
// reserveVol is a helper routine to request a rbdVolume name reservation and generate the
// volume ID for the generated name.
func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
var (
err error
)
var err error
err = updateTopologyConstraints(rbdVol, rbdSnap)
if err != nil {

View File

@ -165,8 +165,7 @@ type imageFeature struct {
dependsOn []string
}
var (
supportedFeatures = map[string]imageFeature{
var supportedFeatures = map[string]imageFeature{
librbd.FeatureNameLayering: {
needRbdNbd: false,
},
@ -177,8 +176,7 @@ var (
needRbdNbd: true,
dependsOn: []string{librbd.FeatureNameExclusiveLock},
},
}
)
}
// Connect an rbdVolume to the Ceph cluster.
func (ri *rbdImage) Connect(cr *util.Credentials) error {
@ -491,7 +489,6 @@ func addRbdManagerTask(ctx context.Context, pOpts *rbdVolume, arg []string) (boo
pOpts.Pool)
supported := true
_, stderr, err := util.ExecCommand(ctx, "ceph", args...)
if err != nil {
switch {
case strings.Contains(stderr, rbdTaskRemoveCmdInvalidString1) &&
@ -547,7 +544,8 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
}
// attempt to use Ceph manager based deletion support if available
args := []string{"trash", "remove",
args := []string{
"trash", "remove",
pOpts.Pool + "/" + pOpts.ImageID,
"--id", cr.ID,
"--keyfile=" + cr.KeyFile,
@ -1398,7 +1396,7 @@ func (ri *rbdImageMetadataStash) String() string {
// stashRBDImageMetadata stashes required fields into the stashFileName at the passed in path, in
// JSON format.
func stashRBDImageMetadata(volOptions *rbdVolume, path string) error {
var imgMeta = rbdImageMetadataStash{
imgMeta := rbdImageMetadataStash{
// there are no checks for this at present
Version: 3, // nolint:gomnd // number specifies version.
Pool: volOptions.Pool,
@ -1419,7 +1417,7 @@ func stashRBDImageMetadata(volOptions *rbdVolume, path string) error {
}
fPath := filepath.Join(path, stashFileName)
err = ioutil.WriteFile(fPath, encodedBytes, 0600)
err = ioutil.WriteFile(fPath, encodedBytes, 0o600)
if err != nil {
return fmt.Errorf("failed to stash JSON image metadata for image (%s) at path (%s): %w", volOptions, fPath, err)
}

View File

@ -165,7 +165,7 @@ func getSchedulingDetails(parameters map[string]string) (admin.Interval, admin.S
// validateSchedulingInterval return the interval as it is if its ending with
// `m|h|d` or else it will return error.
func validateSchedulingInterval(interval string) (admin.Interval, error) {
var re = regexp.MustCompile(`^\d+[mhd]$`)
re := regexp.MustCompile(`^\d+[mhd]$`)
if re.MatchString(interval) {
return admin.Interval(interval), nil
}

View File

@ -44,7 +44,7 @@ const (
)
func createCephConfigRoot() error {
return os.MkdirAll(cephConfigRoot, 0755) // #nosec
return os.MkdirAll(cephConfigRoot, 0o755) // #nosec
}
// WriteCephConfig writes out a basic ceph.conf file, making it easy to use
@ -54,7 +54,7 @@ func WriteCephConfig() error {
return err
}
err := ioutil.WriteFile(CephConfigPath, cephConfig, 0600)
err := ioutil.WriteFile(CephConfigPath, cephConfig, 0o600)
if err != nil {
return err
}

View File

@ -80,7 +80,7 @@ func TestConnPool(t *testing.T) {
// create a keyfile with some contents
keyfile := "/tmp/conn_utils.keyfile"
err := ioutil.WriteFile(keyfile, []byte("the-key"), 0600)
err := ioutil.WriteFile(keyfile, []byte("the-key"), 0o600)
if err != nil {
t.Errorf("failed to create keyfile: %v", err)
return

View File

@ -18,13 +18,12 @@ package util
import (
"context"
"crypto/rand"
"encoding/base64"
"errors"
"fmt"
"path"
"strings"
"crypto/rand"
)
const (

View File

@ -22,11 +22,13 @@ import (
"testing"
)
var basePath = "./test_artifacts"
var csiClusters = "csi-clusters.json"
var pathToConfig = basePath + "/" + csiClusters
var clusterID1 = "test1"
var clusterID2 = "test2"
var (
basePath = "./test_artifacts"
csiClusters = "csi-clusters.json"
pathToConfig = basePath + "/" + csiClusters
clusterID1 = "test1"
clusterID2 = "test2"
)
func cleanupTestData() {
os.RemoveAll(basePath)
@ -41,7 +43,7 @@ func TestCSIConfig(t *testing.T) {
defer cleanupTestData()
err = os.MkdirAll(basePath, 0700)
err = os.MkdirAll(basePath, 0o700)
if err != nil {
t.Errorf("Test setup error %s", err)
}
@ -53,7 +55,7 @@ func TestCSIConfig(t *testing.T) {
}
data = ""
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600)
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil {
t.Errorf("Test setup error %s", err)
}
@ -65,7 +67,7 @@ func TestCSIConfig(t *testing.T) {
}
data = "[{\"clusterIDBad\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600)
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil {
t.Errorf("Test setup error %s", err)
}
@ -77,7 +79,7 @@ func TestCSIConfig(t *testing.T) {
}
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitorsBad\":[\"mon1\",\"mon2\",\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600)
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil {
t.Errorf("Test setup error %s", err)
}
@ -89,7 +91,7 @@ func TestCSIConfig(t *testing.T) {
}
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",2,\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600)
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil {
t.Errorf("Test setup error %s", err)
}
@ -101,7 +103,7 @@ func TestCSIConfig(t *testing.T) {
}
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600)
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil {
t.Errorf("Test setup error %s", err)
}
@ -120,7 +122,7 @@ func TestCSIConfig(t *testing.T) {
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}," +
"{\"clusterID\":\"" + clusterID1 + "\",\"monitors\":[\"mon4\",\"mon5\",\"mon6\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600)
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil {
t.Errorf("Test setup error %s", err)
}

View File

@ -26,6 +26,7 @@ func StartMetricsServer(c *Config) {
FatalLogMsg("failed to listen on address %v: %s", addr, err)
}
}
func addPath(name string, handler http.Handler) {
http.Handle(name, handler)
DebugLogMsg("DEBUG: registered profiling handler on /debug/pprof/%s\n", name)

View File

@ -57,7 +57,6 @@ func TestOperationLocks(t *testing.T) {
volumeID := "test-vol"
lock := NewOperationLock()
err := lock.GetCloneLock(volumeID)
if err != nil {
t.Errorf("failed to acquire clone lock for %s %s", volumeID, err)
}

View File

@ -31,7 +31,6 @@ func TestGetPIDLimit(t *testing.T) {
}
limit, err := GetPIDLimit()
if err != nil {
t.Errorf("no error should be returned, got: %v", err)
}

View File

@ -133,9 +133,7 @@ type TopologyConstrainedPool struct {
// from a CSI CreateVolume request.
func GetTopologyFromRequest(
req *csi.CreateVolumeRequest) (*[]TopologyConstrainedPool, *csi.TopologyRequirement, error) {
var (
topologyPools []TopologyConstrainedPool
)
var topologyPools []TopologyConstrainedPool
// check if parameters have pool configuration pertaining to topology
topologyPoolsStr := req.GetParameters()["topologyConstrainedPools"]

View File

@ -41,17 +41,17 @@ func checkAndReportError(t *testing.T, msg string, err error) {
func TestFindPoolAndTopology(t *testing.T) {
t.Parallel()
var err error
var label1 = "region"
var label2 = "zone"
var l1Value1 = "R1"
var l1Value2 = "R2"
var l2Value1 = "Z1"
var l2Value2 = "Z2"
var pool1 = "PoolA"
var pool2 = "PoolB"
var topologyPrefix = "prefix"
var emptyTopoPools = []TopologyConstrainedPool{}
var emptyPoolNameTopoPools = []TopologyConstrainedPool{
label1 := "region"
label2 := "zone"
l1Value1 := "R1"
l1Value2 := "R2"
l2Value1 := "Z1"
l2Value2 := "Z2"
pool1 := "PoolA"
pool2 := "PoolB"
topologyPrefix := "prefix"
emptyTopoPools := []TopologyConstrainedPool{}
emptyPoolNameTopoPools := []TopologyConstrainedPool{
{
DomainSegments: []topologySegment{
{
@ -65,12 +65,12 @@ func TestFindPoolAndTopology(t *testing.T) {
},
},
}
var emptyDomainsInTopoPools = []TopologyConstrainedPool{
emptyDomainsInTopoPools := []TopologyConstrainedPool{
{
PoolName: pool1,
},
}
var partialDomainsInTopoPools = []TopologyConstrainedPool{
partialDomainsInTopoPools := []TopologyConstrainedPool{
{
PoolName: pool1,
DomainSegments: []topologySegment{
@ -81,7 +81,7 @@ func TestFindPoolAndTopology(t *testing.T) {
},
},
}
var differentDomainsInTopoPools = []TopologyConstrainedPool{
differentDomainsInTopoPools := []TopologyConstrainedPool{
{
PoolName: pool1,
DomainSegments: []topologySegment{
@ -109,7 +109,7 @@ func TestFindPoolAndTopology(t *testing.T) {
},
},
}
var validSingletonTopoPools = []TopologyConstrainedPool{
validSingletonTopoPools := []TopologyConstrainedPool{
{
PoolName: pool1,
DomainSegments: []topologySegment{
@ -124,7 +124,7 @@ func TestFindPoolAndTopology(t *testing.T) {
},
},
}
var validMultipleTopoPools = []TopologyConstrainedPool{
validMultipleTopoPools := []TopologyConstrainedPool{
{
PoolName: pool1,
DomainSegments: []topologySegment{
@ -152,14 +152,14 @@ func TestFindPoolAndTopology(t *testing.T) {
},
},
}
var emptyAccReq = csi.TopologyRequirement{}
var emptySegmentAccReq = csi.TopologyRequirement{
emptyAccReq := csi.TopologyRequirement{}
emptySegmentAccReq := csi.TopologyRequirement{
Requisite: []*csi.Topology{
{},
{},
},
}
var partialHigherSegmentAccReq = csi.TopologyRequirement{
partialHigherSegmentAccReq := csi.TopologyRequirement{
Preferred: []*csi.Topology{
{
Segments: map[string]string{
@ -168,7 +168,7 @@ func TestFindPoolAndTopology(t *testing.T) {
},
},
}
var partialLowerSegmentAccReq = csi.TopologyRequirement{
partialLowerSegmentAccReq := csi.TopologyRequirement{
Preferred: []*csi.Topology{
{
Segments: map[string]string{
@ -177,7 +177,7 @@ func TestFindPoolAndTopology(t *testing.T) {
},
},
}
var differentSegmentAccReq = csi.TopologyRequirement{
differentSegmentAccReq := csi.TopologyRequirement{
Requisite: []*csi.Topology{
{
Segments: map[string]string{
@ -193,7 +193,7 @@ func TestFindPoolAndTopology(t *testing.T) {
},
},
}
var validAccReq = csi.TopologyRequirement{
validAccReq := csi.TopologyRequirement{
Requisite: []*csi.Topology{
{
Segments: map[string]string{

View File

@ -273,7 +273,7 @@ func GenerateVolID(
// CreateMountPoint creates the directory with given path.
func CreateMountPoint(mountPath string) error {
return os.MkdirAll(mountPath, 0750)
return os.MkdirAll(mountPath, 0o750)
}
// checkDirExists checks directory exists or not.

View File

@ -256,10 +256,21 @@ func TestParseKernelRelease(t *testing.T) {
}
}
goodReleases := []string{"5.12", "5.12xlinux", "5.1-2-yam", "3.1-5-x", "5.12.14", "5.12.14xlinux",
"5.12.14-xlinux", "5.12.14-99-x", "3.3x-3"}
goodVersions := [][]int{{5, 12, 0, 0}, {5, 12, 0, 0}, {5, 1, 0, 2}, {3, 1, 0, 5},
{5, 12, 14, 0}, {5, 12, 14, 0}, {5, 12, 14, 0}, {5, 12, 14, 99}, {3, 3, 0, 0}}
goodReleases := []string{
"5.12", "5.12xlinux", "5.1-2-yam", "3.1-5-x", "5.12.14", "5.12.14xlinux",
"5.12.14-xlinux", "5.12.14-99-x", "3.3x-3",
}
goodVersions := [][]int{
{5, 12, 0, 0},
{5, 12, 0, 0},
{5, 1, 0, 2},
{3, 1, 0, 5},
{5, 12, 14, 0},
{5, 12, 14, 0},
{5, 12, 14, 0},
{5, 12, 14, 99},
{3, 3, 0, 0},
}
for i, release := range goodReleases {
version, patchlevel, sublevel, extraversion, err := parseKernelRelease(release)
if err != nil {

View File

@ -90,8 +90,10 @@ func (ci CSIIdentifier) ComposeCSIID() (string, error) {
binary.BigEndian.PutUint64(buf64, uint64(ci.LocationID))
poolIDEncodedHex := hex.EncodeToString(buf64)
return strings.Join([]string{versionEncodedHex, clusterIDLength, ci.ClusterID,
poolIDEncodedHex, ci.ObjectUUID}, "-"), nil
return strings.Join([]string{
versionEncodedHex, clusterIDLength, ci.ClusterID,
poolIDEncodedHex, ci.ObjectUUID,
}, "-"), nil
}
/*