cleanup: resolves gofumpt issues of internal codes

This PR runs gofumpt for internal folder.

Updates: #1586

Signed-off-by: Yati Padia <ypadia@redhat.com>
This commit is contained in:
Yati Padia 2021-07-13 17:51:05 +05:30 committed by mergify[bot]
parent 696ee496fc
commit f36d611ef9
25 changed files with 115 additions and 124 deletions

View File

@ -70,9 +70,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolume
func (ns *NodeServer) NodeStageVolume( func (ns *NodeServer) NodeStageVolume(
ctx context.Context, ctx context.Context,
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
var ( var volOptions *volumeOptions
volOptions *volumeOptions
)
if err := util.ValidateNodeStageVolumeRequest(req); err != nil { if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
return nil, err return nil, err
} }
@ -114,7 +112,6 @@ func (ns *NodeServer) NodeStageVolume(
// Check if the volume is already mounted // Check if the volume is already mounted
isMnt, err := util.IsMountPoint(stagingTargetPath) isMnt, err := util.IsMountPoint(stagingTargetPath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "stat failed: %v", err) util.ErrorLog(ctx, "stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -184,7 +181,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
if !csicommon.MountOptionContains(kernelMountOptions, readOnly) && if !csicommon.MountOptionContains(kernelMountOptions, readOnly) &&
!csicommon.MountOptionContains(fuseMountOptions, readOnly) { !csicommon.MountOptionContains(fuseMountOptions, readOnly) {
// #nosec - allow anyone to write inside the stagingtarget path // #nosec - allow anyone to write inside the stagingtarget path
err = os.Chmod(stagingTargetPath, 0777) err = os.Chmod(stagingTargetPath, 0o777)
if err != nil { if err != nil {
util.ErrorLog( util.ErrorLog(
ctx, ctx,
@ -240,7 +237,6 @@ func (ns *NodeServer) NodePublishVolume(
// Check if the volume is already mounted // Check if the volume is already mounted
isMnt, err := util.IsMountPoint(targetPath) isMnt, err := util.IsMountPoint(targetPath)
if err != nil { if err != nil {
util.ErrorLog(ctx, "stat failed: %v", err) util.ErrorLog(ctx, "stat failed: %v", err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())

View File

@ -29,21 +29,19 @@ import (
"github.com/ceph/go-ceph/rados" "github.com/ceph/go-ceph/rados"
) )
var ( // clusterAdditionalInfo contains information regarding if resize is
// clusterAdditionalInfo contains information regarding if resize is // supported in the particular cluster and subvolumegroup is
// supported in the particular cluster and subvolumegroup is // created or not.
// created or not. // Subvolumegroup creation and volume resize decisions are
// Subvolumegroup creation and volume resize decisions are // taken through this additional cluster information.
// taken through this additional cluster information. var clusterAdditionalInfo = make(map[string]*localClusterState)
clusterAdditionalInfo = make(map[string]*localClusterState)
)
const ( const (
cephEntityClientPrefix = "client." cephEntityClientPrefix = "client."
// modeAllRWX can be used for setting permissions to Read-Write-eXecute // modeAllRWX can be used for setting permissions to Read-Write-eXecute
// for User, Group and Other. // for User, Group and Other.
modeAllRWX = 0777 modeAllRWX = 0o777
) )
// Subvolume holds subvolume information. This includes only the needed members // Subvolume holds subvolume information. This includes only the needed members

View File

@ -181,7 +181,6 @@ func newVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVo
volOptions := req.GetParameters() volOptions := req.GetParameters()
clusterData, err := getClusterInformation(volOptions) clusterData, err := getClusterInformation(volOptions)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -445,7 +444,6 @@ func newVolumeOptionsFromStaticVolume(
opts.ProvisionVolume = !staticVol opts.ProvisionVolume = !staticVol
clusterData, err := getClusterInformation(options) clusterData, err := getClusterInformation(options)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@ -44,8 +44,10 @@ type ReconcilePersistentVolume struct {
Locks *util.VolumeLocks Locks *util.VolumeLocks
} }
var _ reconcile.Reconciler = &ReconcilePersistentVolume{} var (
var _ ctrl.ContollerManager = &ReconcilePersistentVolume{} _ reconcile.Reconciler = &ReconcilePersistentVolume{}
_ ctrl.ContollerManager = &ReconcilePersistentVolume{}
)
// Init will add the ReconcilePersistentVolume to the list. // Init will add the ReconcilePersistentVolume to the list.
func Init() { func Init() {

View File

@ -205,7 +205,6 @@ func panicHandler(
// this. // this.
func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeGetVolumeStatsResponse, error) { func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeGetVolumeStatsResponse, error) {
isMnt, err := util.IsMountPoint(targetPath) isMnt, err := util.IsMountPoint(targetPath)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, status.Errorf(codes.InvalidArgument, "targetpath %s does not exist", targetPath) return nil, status.Errorf(codes.InvalidArgument, "targetpath %s does not exist", targetPath)

View File

@ -734,7 +734,7 @@ func (conn *Connection) Destroy() {
// volumeHandle and the newly generated volumeHandle. // volumeHandle and the newly generated volumeHandle.
func (conn *Connection) CheckNewUUIDMapping(ctx context.Context, func (conn *Connection) CheckNewUUIDMapping(ctx context.Context,
journalPool, volumeHandle string) (string, error) { journalPool, volumeHandle string) (string, error) {
var cj = conn.config cj := conn.config
// check if request name is already part of the directory omap // check if request name is already part of the directory omap
fetchKeys := []string{ fetchKeys := []string{
@ -762,7 +762,7 @@ func (conn *Connection) CheckNewUUIDMapping(ctx context.Context,
// internal reference. // internal reference.
func (conn *Connection) ReserveNewUUIDMapping(ctx context.Context, func (conn *Connection) ReserveNewUUIDMapping(ctx context.Context,
journalPool, oldVolumeHandle, newVolumeHandle string) error { journalPool, oldVolumeHandle, newVolumeHandle string) error {
var cj = conn.config cj := conn.config
setKeys := map[string]string{ setKeys := map[string]string{
cj.csiNameKeyPrefix + oldVolumeHandle: newVolumeHandle, cj.csiNameKeyPrefix + oldVolumeHandle: newVolumeHandle,

View File

@ -29,13 +29,11 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
) )
var ( var liveness = prometheus.NewGauge(prometheus.GaugeOpts{
liveness = prometheus.NewGauge(prometheus.GaugeOpts{ Namespace: "csi",
Namespace: "csi", Name: "liveness",
Name: "liveness", Help: "Liveness Probe",
Help: "Liveness Probe", })
})
)
func getLiveness(timeout time.Duration, csiConn *grpc.ClientConn) { func getLiveness(timeout time.Duration, csiConn *grpc.ClientConn) {
ctx, cancel := context.WithTimeout(context.Background(), timeout) ctx, cancel := context.WithTimeout(context.Background(), timeout)

View File

@ -22,7 +22,6 @@ import (
"strconv" "strconv"
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
librbd "github.com/ceph/go-ceph/rbd" librbd "github.com/ceph/go-ceph/rbd"
@ -600,8 +599,7 @@ func (cs *ControllerServer) createBackingImage(
rbdSnap *rbdSnapshot) error { rbdSnap *rbdSnapshot) error {
var err error var err error
var j = &journal.Connection{} j, err := volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
j, err = volJournal.Connect(rbdVol.Monitors, rbdVol.RadosNamespace, cr)
if err != nil { if err != nil {
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
} }
@ -747,8 +745,7 @@ func (cs *ControllerServer) DeleteVolume(
} }
defer cs.OperationLocks.ReleaseDeleteLock(volumeID) defer cs.OperationLocks.ReleaseDeleteLock(volumeID)
var rbdVol = &rbdVolume{} rbdVol, err := genVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
rbdVol, err = genVolFromVolID(ctx, volumeID, cr, req.GetSecrets())
defer rbdVol.Destroy() defer rbdVol.Destroy()
if err != nil { if err != nil {
if errors.Is(err, util.ErrPoolNotFound) { if errors.Is(err, util.ErrPoolNotFound) {
@ -871,9 +868,8 @@ func (cs *ControllerServer) CreateSnapshot(
} }
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
var rbdVol = &rbdVolume{}
// Fetch source volume information // Fetch source volume information
rbdVol, err = genVolFromVolID(ctx, req.GetSourceVolumeId(), cr, req.GetSecrets()) rbdVol, err := genVolFromVolID(ctx, req.GetSourceVolumeId(), cr, req.GetSecrets())
defer rbdVol.Destroy() defer rbdVol.Destroy()
if err != nil { if err != nil {
switch { switch {
@ -950,9 +946,8 @@ func (cs *ControllerServer) CreateSnapshot(
}() }()
var ready bool var ready bool
var vol = new(rbdVolume)
ready, vol, err = cs.doSnapshotClone(ctx, rbdVol, rbdSnap, cr) ready, vol, err := cs.doSnapshotClone(ctx, rbdVol, rbdSnap, cr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -1145,9 +1140,8 @@ func (cs *ControllerServer) doSnapshotClone(
util.ErrorLog(ctx, "failed to get image id: %v", err) util.ErrorLog(ctx, "failed to get image id: %v", err)
return ready, cloneRbd, err return ready, cloneRbd, err
} }
var j = &journal.Connection{}
// save image ID // save image ID
j, err = snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr) j, err := snapJournal.Connect(rbdSnap.Monitors, rbdSnap.RadosNamespace, cr)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to connect to cluster: %v", err) util.ErrorLog(ctx, "failed to connect to cluster: %v", err)
return ready, cloneRbd, err return ready, cloneRbd, err
@ -1315,8 +1309,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
} }
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
var rbdVol = &rbdVolume{} rbdVol, err := genVolFromVolID(ctx, volID, cr, req.GetSecrets())
rbdVol, err = genVolFromVolID(ctx, volID, cr, req.GetSecrets())
defer rbdVol.Destroy() defer rbdVol.Destroy()
if err != nil { if err != nil {
switch { switch {

View File

@ -142,8 +142,10 @@ func (r *Driver) Run(conf *util.Config) {
// MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first // MULTI_NODE_SINGLE_WRITER etc, but need to do some verification of RO modes first
// will work those as follow up features // will work those as follow up features
r.cd.AddVolumeCapabilityAccessModes( r.cd.AddVolumeCapabilityAccessModes(
[]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER, []csi.VolumeCapability_AccessMode_Mode{
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}) csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER,
csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER,
})
} }
// Create GRPC servers // Create GRPC servers

View File

@ -342,7 +342,7 @@ func (ns *NodeServer) stageTransaction(
if !readOnly { if !readOnly {
// #nosec - allow anyone to write inside the target path // #nosec - allow anyone to write inside the target path
err = os.Chmod(stagingTargetPath, 0777) err = os.Chmod(stagingTargetPath, 0o777)
} }
return transaction, err return transaction, err
} }
@ -400,7 +400,7 @@ func (ns *NodeServer) undoStagingTransaction(
func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath string, isBlock bool) error { func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath string, isBlock bool) error {
if isBlock { if isBlock {
// #nosec:G304, intentionally creating file mountPath, not a security issue // #nosec:G304, intentionally creating file mountPath, not a security issue
pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0600) pathFile, err := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o600)
if err != nil { if err != nil {
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err) util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
@ -413,7 +413,7 @@ func (ns *NodeServer) createStageMountPoint(ctx context.Context, mountPath strin
return nil return nil
} }
err := os.Mkdir(mountPath, 0750) err := os.Mkdir(mountPath, 0o750)
if err != nil { if err != nil {
if !os.IsExist(err) { if !os.IsExist(err) {
util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err) util.ErrorLog(ctx, "failed to create mountPath:%s with error: %v", mountPath, err)
@ -582,7 +582,7 @@ func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath strin
} }
if isBlock { if isBlock {
// #nosec // #nosec
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0750) pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0o750)
if e != nil { if e != nil {
util.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err) util.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err)
return notMnt, status.Error(codes.Internal, e.Error()) return notMnt, status.Error(codes.Internal, e.Error())

View File

@ -362,9 +362,7 @@ func (rv *rbdVolume) repairImageID(ctx context.Context, j *journal.Connection) e
// reserveSnap is a helper routine to request a rbdSnapshot name reservation and generate the // reserveSnap is a helper routine to request a rbdSnapshot name reservation and generate the
// volume ID for the generated name. // volume ID for the generated name.
func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, cr *util.Credentials) error { func reserveSnap(ctx context.Context, rbdSnap *rbdSnapshot, rbdVol *rbdVolume, cr *util.Credentials) error {
var ( var err error
err error
)
journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdSnap.Monitors, rbdSnap.JournalPool, rbdSnap.Pool, cr) journalPoolID, imagePoolID, err := util.GetPoolIDs(ctx, rbdSnap.Monitors, rbdSnap.JournalPool, rbdSnap.Pool, cr)
if err != nil { if err != nil {
@ -435,9 +433,7 @@ func updateTopologyConstraints(rbdVol *rbdVolume, rbdSnap *rbdSnapshot) error {
// reserveVol is a helper routine to request a rbdVolume name reservation and generate the // reserveVol is a helper routine to request a rbdVolume name reservation and generate the
// volume ID for the generated name. // volume ID for the generated name.
func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) error { func reserveVol(ctx context.Context, rbdVol *rbdVolume, rbdSnap *rbdSnapshot, cr *util.Credentials) error {
var ( var err error
err error
)
err = updateTopologyConstraints(rbdVol, rbdSnap) err = updateTopologyConstraints(rbdVol, rbdSnap)
if err != nil { if err != nil {

View File

@ -165,20 +165,18 @@ type imageFeature struct {
dependsOn []string dependsOn []string
} }
var ( var supportedFeatures = map[string]imageFeature{
supportedFeatures = map[string]imageFeature{ librbd.FeatureNameLayering: {
librbd.FeatureNameLayering: { needRbdNbd: false,
needRbdNbd: false, },
}, librbd.FeatureNameExclusiveLock: {
librbd.FeatureNameExclusiveLock: { needRbdNbd: true,
needRbdNbd: true, },
}, librbd.FeatureNameJournaling: {
librbd.FeatureNameJournaling: { needRbdNbd: true,
needRbdNbd: true, dependsOn: []string{librbd.FeatureNameExclusiveLock},
dependsOn: []string{librbd.FeatureNameExclusiveLock}, },
}, }
}
)
// Connect an rbdVolume to the Ceph cluster. // Connect an rbdVolume to the Ceph cluster.
func (ri *rbdImage) Connect(cr *util.Credentials) error { func (ri *rbdImage) Connect(cr *util.Credentials) error {
@ -491,7 +489,6 @@ func addRbdManagerTask(ctx context.Context, pOpts *rbdVolume, arg []string) (boo
pOpts.Pool) pOpts.Pool)
supported := true supported := true
_, stderr, err := util.ExecCommand(ctx, "ceph", args...) _, stderr, err := util.ExecCommand(ctx, "ceph", args...)
if err != nil { if err != nil {
switch { switch {
case strings.Contains(stderr, rbdTaskRemoveCmdInvalidString1) && case strings.Contains(stderr, rbdTaskRemoveCmdInvalidString1) &&
@ -547,7 +544,8 @@ func deleteImage(ctx context.Context, pOpts *rbdVolume, cr *util.Credentials) er
} }
// attempt to use Ceph manager based deletion support if available // attempt to use Ceph manager based deletion support if available
args := []string{"trash", "remove", args := []string{
"trash", "remove",
pOpts.Pool + "/" + pOpts.ImageID, pOpts.Pool + "/" + pOpts.ImageID,
"--id", cr.ID, "--id", cr.ID,
"--keyfile=" + cr.KeyFile, "--keyfile=" + cr.KeyFile,
@ -1398,7 +1396,7 @@ func (ri *rbdImageMetadataStash) String() string {
// stashRBDImageMetadata stashes required fields into the stashFileName at the passed in path, in // stashRBDImageMetadata stashes required fields into the stashFileName at the passed in path, in
// JSON format. // JSON format.
func stashRBDImageMetadata(volOptions *rbdVolume, path string) error { func stashRBDImageMetadata(volOptions *rbdVolume, path string) error {
var imgMeta = rbdImageMetadataStash{ imgMeta := rbdImageMetadataStash{
// there are no checks for this at present // there are no checks for this at present
Version: 3, // nolint:gomnd // number specifies version. Version: 3, // nolint:gomnd // number specifies version.
Pool: volOptions.Pool, Pool: volOptions.Pool,
@ -1419,7 +1417,7 @@ func stashRBDImageMetadata(volOptions *rbdVolume, path string) error {
} }
fPath := filepath.Join(path, stashFileName) fPath := filepath.Join(path, stashFileName)
err = ioutil.WriteFile(fPath, encodedBytes, 0600) err = ioutil.WriteFile(fPath, encodedBytes, 0o600)
if err != nil { if err != nil {
return fmt.Errorf("failed to stash JSON image metadata for image (%s) at path (%s): %w", volOptions, fPath, err) return fmt.Errorf("failed to stash JSON image metadata for image (%s) at path (%s): %w", volOptions, fPath, err)
} }

View File

@ -165,7 +165,7 @@ func getSchedulingDetails(parameters map[string]string) (admin.Interval, admin.S
// validateSchedulingInterval return the interval as it is if its ending with // validateSchedulingInterval return the interval as it is if its ending with
// `m|h|d` or else it will return error. // `m|h|d` or else it will return error.
func validateSchedulingInterval(interval string) (admin.Interval, error) { func validateSchedulingInterval(interval string) (admin.Interval, error) {
var re = regexp.MustCompile(`^\d+[mhd]$`) re := regexp.MustCompile(`^\d+[mhd]$`)
if re.MatchString(interval) { if re.MatchString(interval) {
return admin.Interval(interval), nil return admin.Interval(interval), nil
} }

View File

@ -44,7 +44,7 @@ const (
) )
func createCephConfigRoot() error { func createCephConfigRoot() error {
return os.MkdirAll(cephConfigRoot, 0755) // #nosec return os.MkdirAll(cephConfigRoot, 0o755) // #nosec
} }
// WriteCephConfig writes out a basic ceph.conf file, making it easy to use // WriteCephConfig writes out a basic ceph.conf file, making it easy to use
@ -54,7 +54,7 @@ func WriteCephConfig() error {
return err return err
} }
err := ioutil.WriteFile(CephConfigPath, cephConfig, 0600) err := ioutil.WriteFile(CephConfigPath, cephConfig, 0o600)
if err != nil { if err != nil {
return err return err
} }

View File

@ -80,7 +80,7 @@ func TestConnPool(t *testing.T) {
// create a keyfile with some contents // create a keyfile with some contents
keyfile := "/tmp/conn_utils.keyfile" keyfile := "/tmp/conn_utils.keyfile"
err := ioutil.WriteFile(keyfile, []byte("the-key"), 0600) err := ioutil.WriteFile(keyfile, []byte("the-key"), 0o600)
if err != nil { if err != nil {
t.Errorf("failed to create keyfile: %v", err) t.Errorf("failed to create keyfile: %v", err)
return return

View File

@ -18,13 +18,12 @@ package util
import ( import (
"context" "context"
"crypto/rand"
"encoding/base64" "encoding/base64"
"errors" "errors"
"fmt" "fmt"
"path" "path"
"strings" "strings"
"crypto/rand"
) )
const ( const (

View File

@ -22,11 +22,13 @@ import (
"testing" "testing"
) )
var basePath = "./test_artifacts" var (
var csiClusters = "csi-clusters.json" basePath = "./test_artifacts"
var pathToConfig = basePath + "/" + csiClusters csiClusters = "csi-clusters.json"
var clusterID1 = "test1" pathToConfig = basePath + "/" + csiClusters
var clusterID2 = "test2" clusterID1 = "test1"
clusterID2 = "test2"
)
func cleanupTestData() { func cleanupTestData() {
os.RemoveAll(basePath) os.RemoveAll(basePath)
@ -41,7 +43,7 @@ func TestCSIConfig(t *testing.T) {
defer cleanupTestData() defer cleanupTestData()
err = os.MkdirAll(basePath, 0700) err = os.MkdirAll(basePath, 0o700)
if err != nil { if err != nil {
t.Errorf("Test setup error %s", err) t.Errorf("Test setup error %s", err)
} }
@ -53,7 +55,7 @@ func TestCSIConfig(t *testing.T) {
} }
data = "" data = ""
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600) err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil { if err != nil {
t.Errorf("Test setup error %s", err) t.Errorf("Test setup error %s", err)
} }
@ -65,7 +67,7 @@ func TestCSIConfig(t *testing.T) {
} }
data = "[{\"clusterIDBad\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}]" data = "[{\"clusterIDBad\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600) err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil { if err != nil {
t.Errorf("Test setup error %s", err) t.Errorf("Test setup error %s", err)
} }
@ -77,7 +79,7 @@ func TestCSIConfig(t *testing.T) {
} }
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitorsBad\":[\"mon1\",\"mon2\",\"mon3\"]}]" data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitorsBad\":[\"mon1\",\"mon2\",\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600) err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil { if err != nil {
t.Errorf("Test setup error %s", err) t.Errorf("Test setup error %s", err)
} }
@ -89,7 +91,7 @@ func TestCSIConfig(t *testing.T) {
} }
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",2,\"mon3\"]}]" data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",2,\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600) err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil { if err != nil {
t.Errorf("Test setup error %s", err) t.Errorf("Test setup error %s", err)
} }
@ -101,7 +103,7 @@ func TestCSIConfig(t *testing.T) {
} }
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}]" data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600) err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil { if err != nil {
t.Errorf("Test setup error %s", err) t.Errorf("Test setup error %s", err)
} }
@ -120,7 +122,7 @@ func TestCSIConfig(t *testing.T) {
data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}," + data = "[{\"clusterID\":\"" + clusterID2 + "\",\"monitors\":[\"mon1\",\"mon2\",\"mon3\"]}," +
"{\"clusterID\":\"" + clusterID1 + "\",\"monitors\":[\"mon4\",\"mon5\",\"mon6\"]}]" "{\"clusterID\":\"" + clusterID1 + "\",\"monitors\":[\"mon4\",\"mon5\",\"mon6\"]}]"
err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0600) err = ioutil.WriteFile(basePath+"/"+csiClusters, []byte(data), 0o600)
if err != nil { if err != nil {
t.Errorf("Test setup error %s", err) t.Errorf("Test setup error %s", err)
} }

View File

@ -26,6 +26,7 @@ func StartMetricsServer(c *Config) {
FatalLogMsg("failed to listen on address %v: %s", addr, err) FatalLogMsg("failed to listen on address %v: %s", addr, err)
} }
} }
func addPath(name string, handler http.Handler) { func addPath(name string, handler http.Handler) {
http.Handle(name, handler) http.Handle(name, handler)
DebugLogMsg("DEBUG: registered profiling handler on /debug/pprof/%s\n", name) DebugLogMsg("DEBUG: registered profiling handler on /debug/pprof/%s\n", name)

View File

@ -57,7 +57,6 @@ func TestOperationLocks(t *testing.T) {
volumeID := "test-vol" volumeID := "test-vol"
lock := NewOperationLock() lock := NewOperationLock()
err := lock.GetCloneLock(volumeID) err := lock.GetCloneLock(volumeID)
if err != nil { if err != nil {
t.Errorf("failed to acquire clone lock for %s %s", volumeID, err) t.Errorf("failed to acquire clone lock for %s %s", volumeID, err)
} }

View File

@ -31,7 +31,6 @@ func TestGetPIDLimit(t *testing.T) {
} }
limit, err := GetPIDLimit() limit, err := GetPIDLimit()
if err != nil { if err != nil {
t.Errorf("no error should be returned, got: %v", err) t.Errorf("no error should be returned, got: %v", err)
} }

View File

@ -133,9 +133,7 @@ type TopologyConstrainedPool struct {
// from a CSI CreateVolume request. // from a CSI CreateVolume request.
func GetTopologyFromRequest( func GetTopologyFromRequest(
req *csi.CreateVolumeRequest) (*[]TopologyConstrainedPool, *csi.TopologyRequirement, error) { req *csi.CreateVolumeRequest) (*[]TopologyConstrainedPool, *csi.TopologyRequirement, error) {
var ( var topologyPools []TopologyConstrainedPool
topologyPools []TopologyConstrainedPool
)
// check if parameters have pool configuration pertaining to topology // check if parameters have pool configuration pertaining to topology
topologyPoolsStr := req.GetParameters()["topologyConstrainedPools"] topologyPoolsStr := req.GetParameters()["topologyConstrainedPools"]

View File

@ -41,17 +41,17 @@ func checkAndReportError(t *testing.T, msg string, err error) {
func TestFindPoolAndTopology(t *testing.T) { func TestFindPoolAndTopology(t *testing.T) {
t.Parallel() t.Parallel()
var err error var err error
var label1 = "region" label1 := "region"
var label2 = "zone" label2 := "zone"
var l1Value1 = "R1" l1Value1 := "R1"
var l1Value2 = "R2" l1Value2 := "R2"
var l2Value1 = "Z1" l2Value1 := "Z1"
var l2Value2 = "Z2" l2Value2 := "Z2"
var pool1 = "PoolA" pool1 := "PoolA"
var pool2 = "PoolB" pool2 := "PoolB"
var topologyPrefix = "prefix" topologyPrefix := "prefix"
var emptyTopoPools = []TopologyConstrainedPool{} emptyTopoPools := []TopologyConstrainedPool{}
var emptyPoolNameTopoPools = []TopologyConstrainedPool{ emptyPoolNameTopoPools := []TopologyConstrainedPool{
{ {
DomainSegments: []topologySegment{ DomainSegments: []topologySegment{
{ {
@ -65,12 +65,12 @@ func TestFindPoolAndTopology(t *testing.T) {
}, },
}, },
} }
var emptyDomainsInTopoPools = []TopologyConstrainedPool{ emptyDomainsInTopoPools := []TopologyConstrainedPool{
{ {
PoolName: pool1, PoolName: pool1,
}, },
} }
var partialDomainsInTopoPools = []TopologyConstrainedPool{ partialDomainsInTopoPools := []TopologyConstrainedPool{
{ {
PoolName: pool1, PoolName: pool1,
DomainSegments: []topologySegment{ DomainSegments: []topologySegment{
@ -81,7 +81,7 @@ func TestFindPoolAndTopology(t *testing.T) {
}, },
}, },
} }
var differentDomainsInTopoPools = []TopologyConstrainedPool{ differentDomainsInTopoPools := []TopologyConstrainedPool{
{ {
PoolName: pool1, PoolName: pool1,
DomainSegments: []topologySegment{ DomainSegments: []topologySegment{
@ -109,7 +109,7 @@ func TestFindPoolAndTopology(t *testing.T) {
}, },
}, },
} }
var validSingletonTopoPools = []TopologyConstrainedPool{ validSingletonTopoPools := []TopologyConstrainedPool{
{ {
PoolName: pool1, PoolName: pool1,
DomainSegments: []topologySegment{ DomainSegments: []topologySegment{
@ -124,7 +124,7 @@ func TestFindPoolAndTopology(t *testing.T) {
}, },
}, },
} }
var validMultipleTopoPools = []TopologyConstrainedPool{ validMultipleTopoPools := []TopologyConstrainedPool{
{ {
PoolName: pool1, PoolName: pool1,
DomainSegments: []topologySegment{ DomainSegments: []topologySegment{
@ -152,14 +152,14 @@ func TestFindPoolAndTopology(t *testing.T) {
}, },
}, },
} }
var emptyAccReq = csi.TopologyRequirement{} emptyAccReq := csi.TopologyRequirement{}
var emptySegmentAccReq = csi.TopologyRequirement{ emptySegmentAccReq := csi.TopologyRequirement{
Requisite: []*csi.Topology{ Requisite: []*csi.Topology{
{}, {},
{}, {},
}, },
} }
var partialHigherSegmentAccReq = csi.TopologyRequirement{ partialHigherSegmentAccReq := csi.TopologyRequirement{
Preferred: []*csi.Topology{ Preferred: []*csi.Topology{
{ {
Segments: map[string]string{ Segments: map[string]string{
@ -168,7 +168,7 @@ func TestFindPoolAndTopology(t *testing.T) {
}, },
}, },
} }
var partialLowerSegmentAccReq = csi.TopologyRequirement{ partialLowerSegmentAccReq := csi.TopologyRequirement{
Preferred: []*csi.Topology{ Preferred: []*csi.Topology{
{ {
Segments: map[string]string{ Segments: map[string]string{
@ -177,7 +177,7 @@ func TestFindPoolAndTopology(t *testing.T) {
}, },
}, },
} }
var differentSegmentAccReq = csi.TopologyRequirement{ differentSegmentAccReq := csi.TopologyRequirement{
Requisite: []*csi.Topology{ Requisite: []*csi.Topology{
{ {
Segments: map[string]string{ Segments: map[string]string{
@ -193,7 +193,7 @@ func TestFindPoolAndTopology(t *testing.T) {
}, },
}, },
} }
var validAccReq = csi.TopologyRequirement{ validAccReq := csi.TopologyRequirement{
Requisite: []*csi.Topology{ Requisite: []*csi.Topology{
{ {
Segments: map[string]string{ Segments: map[string]string{

View File

@ -273,7 +273,7 @@ func GenerateVolID(
// CreateMountPoint creates the directory with given path. // CreateMountPoint creates the directory with given path.
func CreateMountPoint(mountPath string) error { func CreateMountPoint(mountPath string) error {
return os.MkdirAll(mountPath, 0750) return os.MkdirAll(mountPath, 0o750)
} }
// checkDirExists checks directory exists or not. // checkDirExists checks directory exists or not.

View File

@ -256,10 +256,21 @@ func TestParseKernelRelease(t *testing.T) {
} }
} }
goodReleases := []string{"5.12", "5.12xlinux", "5.1-2-yam", "3.1-5-x", "5.12.14", "5.12.14xlinux", goodReleases := []string{
"5.12.14-xlinux", "5.12.14-99-x", "3.3x-3"} "5.12", "5.12xlinux", "5.1-2-yam", "3.1-5-x", "5.12.14", "5.12.14xlinux",
goodVersions := [][]int{{5, 12, 0, 0}, {5, 12, 0, 0}, {5, 1, 0, 2}, {3, 1, 0, 5}, "5.12.14-xlinux", "5.12.14-99-x", "3.3x-3",
{5, 12, 14, 0}, {5, 12, 14, 0}, {5, 12, 14, 0}, {5, 12, 14, 99}, {3, 3, 0, 0}} }
goodVersions := [][]int{
{5, 12, 0, 0},
{5, 12, 0, 0},
{5, 1, 0, 2},
{3, 1, 0, 5},
{5, 12, 14, 0},
{5, 12, 14, 0},
{5, 12, 14, 0},
{5, 12, 14, 99},
{3, 3, 0, 0},
}
for i, release := range goodReleases { for i, release := range goodReleases {
version, patchlevel, sublevel, extraversion, err := parseKernelRelease(release) version, patchlevel, sublevel, extraversion, err := parseKernelRelease(release)
if err != nil { if err != nil {

View File

@ -90,8 +90,10 @@ func (ci CSIIdentifier) ComposeCSIID() (string, error) {
binary.BigEndian.PutUint64(buf64, uint64(ci.LocationID)) binary.BigEndian.PutUint64(buf64, uint64(ci.LocationID))
poolIDEncodedHex := hex.EncodeToString(buf64) poolIDEncodedHex := hex.EncodeToString(buf64)
return strings.Join([]string{versionEncodedHex, clusterIDLength, ci.ClusterID, return strings.Join([]string{
poolIDEncodedHex, ci.ObjectUUID}, "-"), nil versionEncodedHex, clusterIDLength, ci.ClusterID,
poolIDEncodedHex, ci.ObjectUUID,
}, "-"), nil
} }
/* /*