mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
Fix issues found in gometalinter
Signed-off-by: Madhu Rajanna <mrajanna@redhat.com>
This commit is contained in:
@ -55,16 +55,19 @@ var (
|
||||
// info from metadata store
|
||||
func (cs *ControllerServer) LoadExDataFromMetadataStore() error {
|
||||
vol := &rbdVolume{}
|
||||
// nolint: errcheck
|
||||
cs.MetadataStore.ForAll("csi-rbd-vol-", vol, func(identifier string) error {
|
||||
rbdVolumes[identifier] = vol
|
||||
return nil
|
||||
})
|
||||
|
||||
snap := &rbdSnapshot{}
|
||||
// nolint: errcheck
|
||||
cs.MetadataStore.ForAll("csi-rbd-(.*)-snap-", snap, func(identifier string) error {
|
||||
rbdSnapshots[identifier] = snap
|
||||
return nil
|
||||
})
|
||||
|
||||
glog.Infof("Loaded %d volumes and %d snapshots from metadata store", len(rbdVolumes), len(rbdSnapshots))
|
||||
return nil
|
||||
}
|
||||
@ -91,7 +94,11 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
|
||||
return nil, err
|
||||
}
|
||||
volumeNameMutex.LockKey(req.GetName())
|
||||
defer volumeNameMutex.UnlockKey(req.GetName())
|
||||
defer func() {
|
||||
if err := volumeNameMutex.UnlockKey(req.GetName()); err != nil {
|
||||
glog.Warningf("failed to unlock mutex volume:%s %v", req.GetName(), err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Need to check for already existing volume name, and if found
|
||||
// check for the requested capacity and already allocated capacity
|
||||
@ -208,7 +215,13 @@ func (cs *ControllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
|
||||
// For now the image get unconditionally deleted, but here retention policy can be checked
|
||||
volumeID := req.GetVolumeId()
|
||||
volumeIDMutex.LockKey(volumeID)
|
||||
defer volumeIDMutex.UnlockKey(volumeID)
|
||||
|
||||
defer func() {
|
||||
if err := volumeIDMutex.UnlockKey(volumeID); err != nil {
|
||||
glog.Warningf("failed to unlock mutex volume:%s %v", volumeID, err)
|
||||
}
|
||||
}()
|
||||
|
||||
rbdVol := &rbdVolume{}
|
||||
if err := cs.MetadataStore.Get(volumeID, rbdVol); err != nil {
|
||||
if os.IsNotExist(errors.Cause(err)) {
|
||||
@ -276,7 +289,12 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
}
|
||||
|
||||
snapshotNameMutex.LockKey(req.GetName())
|
||||
defer snapshotNameMutex.UnlockKey(req.GetName())
|
||||
|
||||
defer func() {
|
||||
if err := snapshotNameMutex.UnlockKey(req.GetName()); err != nil {
|
||||
glog.Warningf("failed to unlock mutex snapshot:%s %v", req.GetName(), err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Need to check for already existing snapshot name, and if found
|
||||
// check for the requested source volume id and already allocated source volume id
|
||||
@ -397,7 +415,12 @@ func (cs *ControllerServer) DeleteSnapshot(ctx context.Context, req *csi.DeleteS
|
||||
return nil, status.Error(codes.InvalidArgument, "Snapshot ID cannot be empty")
|
||||
}
|
||||
snapshotIDMutex.LockKey(snapshotID)
|
||||
defer snapshotIDMutex.UnlockKey(snapshotID)
|
||||
|
||||
defer func() {
|
||||
if err := snapshotIDMutex.UnlockKey(snapshotID); err != nil {
|
||||
glog.Warningf("failed to unlock mutex snapshot:%s %v", snapshotID, err)
|
||||
}
|
||||
}()
|
||||
|
||||
rbdSnap := &rbdSnapshot{}
|
||||
if err := cs.MetadataStore.Get(snapshotID, rbdSnap); err != nil {
|
||||
|
@ -60,7 +60,12 @@ func (ns *NodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstag
|
||||
func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
|
||||
targetPath := req.GetTargetPath()
|
||||
targetPathMutex.LockKey(targetPath)
|
||||
defer targetPathMutex.UnlockKey(targetPath)
|
||||
|
||||
defer func() {
|
||||
if err := targetPathMutex.UnlockKey(targetPath); err != nil {
|
||||
glog.Warningf("failed to unlock mutex targetpath:%s %v", targetPath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
var volName string
|
||||
isBlock := req.GetVolumeCapability().GetBlock() != nil
|
||||
@ -84,12 +89,12 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
if os.IsNotExist(err) {
|
||||
if isBlock {
|
||||
// create an empty file
|
||||
targetPathFile, err := os.OpenFile(targetPath, os.O_CREATE|os.O_RDWR, 0750)
|
||||
if err != nil {
|
||||
targetPathFile, e := os.OpenFile(targetPath, os.O_CREATE|os.O_RDWR, 0750)
|
||||
if e != nil {
|
||||
glog.V(4).Infof("Failed to create targetPath:%s with error: %v", targetPath, err)
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
return nil, status.Error(codes.Internal, e.Error())
|
||||
}
|
||||
if err := targetPathFile.Close(); err != nil {
|
||||
if err = targetPathFile.Close(); err != nil {
|
||||
glog.V(4).Infof("Failed to close targetPath:%s with error: %v", targetPath, err)
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -153,7 +158,12 @@ func (ns *NodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
|
||||
func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
|
||||
targetPath := req.GetTargetPath()
|
||||
targetPathMutex.LockKey(targetPath)
|
||||
defer targetPathMutex.UnlockKey(targetPath)
|
||||
|
||||
defer func() {
|
||||
if err := targetPathMutex.UnlockKey(targetPath); err != nil {
|
||||
glog.Warningf("failed to unlock mutex targetpath:%s %v", targetPath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
notMnt, err := ns.mounter.IsNotMountPoint(targetPath)
|
||||
if err != nil {
|
||||
@ -177,7 +187,6 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
||||
|
||||
// Bind mounted device needs to be resolved by using resolveBindMountedBlockDevice
|
||||
if devicePath == "devtmpfs" {
|
||||
var err error
|
||||
devicePath, err = resolveBindMountedBlockDevice(targetPath)
|
||||
if err != nil {
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -206,13 +215,13 @@ func (ns *NodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
|
||||
}
|
||||
|
||||
// Unmapping rbd device
|
||||
if err := detachRBDDevice(devicePath); err != nil {
|
||||
if err = detachRBDDevice(devicePath); err != nil {
|
||||
glog.V(3).Infof("failed to unmap rbd device: %s with error: %v", devicePath, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Remove targetPath
|
||||
if err := os.RemoveAll(targetPath); err != nil {
|
||||
if err = os.RemoveAll(targetPath); err != nil {
|
||||
glog.V(3).Infof("failed to remove targetPath: %s with error: %v", targetPath, err)
|
||||
return nil, err
|
||||
}
|
||||
@ -240,7 +249,7 @@ func parseFindMntResolveSource(out string) (string, error) {
|
||||
return match[1], nil
|
||||
}
|
||||
// Check if out is a block device
|
||||
reBlk := regexp.MustCompile("^devtmpfs\\[(/[^/]+(?:/[^/]*)*)\\]$")
|
||||
reBlk := regexp.MustCompile(`^devtmpfs\\[(/[^/]+(?:/[^/]*)*)\\]$`)
|
||||
if match := reBlk.FindStringSubmatch(out); match != nil {
|
||||
return fmt.Sprintf("/dev%s", match[1]), nil
|
||||
}
|
||||
|
@ -112,7 +112,10 @@ func (r *Driver) Run(driverName, nodeID, endpoint string, containerized bool, ca
|
||||
}
|
||||
|
||||
r.cs = NewControllerServer(r.cd, cachePersister)
|
||||
r.cs.LoadExDataFromMetadataStore()
|
||||
|
||||
if err = r.cs.LoadExDataFromMetadataStore(); err != nil {
|
||||
glog.Fatalf("failed to load metadata from store, err %v\n", err)
|
||||
}
|
||||
|
||||
s := csicommon.NewNonBlockingGRPCServer()
|
||||
s.Start(endpoint, r.ids, r.cs, r.ns)
|
||||
|
@ -229,7 +229,12 @@ func attachRBDImage(volOptions *rbdVolume, userID string, credentials map[string
|
||||
devicePath, found := waitForPath(volOptions.Pool, image, 1, useNBD)
|
||||
if !found {
|
||||
attachdetachMutex.LockKey(imagePath)
|
||||
defer attachdetachMutex.UnlockKey(imagePath)
|
||||
|
||||
defer func() {
|
||||
if err = attachdetachMutex.UnlockKey(imagePath); err != nil {
|
||||
glog.Warningf("failed to unlock mutex imagepath:%s %v", imagePath, err)
|
||||
}
|
||||
}()
|
||||
|
||||
_, err = execCommand("modprobe", []string{moduleName})
|
||||
if err != nil {
|
||||
|
Reference in New Issue
Block a user