mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-22 14:20:19 +00:00
cleanup: refactor deeply nested if statements in internal/rbd
Refactored deeply nested if statement in internal/rbd to reduce cognitive complexity. Signed-off-by: Rakshith R <rar@redhat.com>
This commit is contained in:
parent
d4cfd7bef9
commit
020cded581
@ -74,7 +74,8 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
|
||||
|
||||
err = tempClone.checkSnapExists(snap)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrSnapNotFound) {
|
||||
switch {
|
||||
case errors.Is(err, ErrSnapNotFound):
|
||||
// check temporary image needs flatten, if yes add task to flatten the
|
||||
// temporary clone
|
||||
err = tempClone.flattenRbdImage(ctx, rv.conn.Creds, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
||||
@ -88,7 +89,7 @@ func (rv *rbdVolume) checkCloneImage(ctx context.Context, parentVol *rbdVolume)
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
} else if !errors.Is(err, ErrImageNotFound) {
|
||||
case !errors.Is(err, ErrImageNotFound):
|
||||
// any error other than image not found return error
|
||||
return false, err
|
||||
}
|
||||
|
@ -809,7 +809,8 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
defer vol.Destroy()
|
||||
|
||||
err = vol.flattenRbdImage(ctx, cr, false, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
||||
if errors.Is(err, ErrFlattenInProgress) {
|
||||
switch {
|
||||
case errors.Is(err, ErrFlattenInProgress):
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: rbdSnap.SizeBytes,
|
||||
@ -819,24 +820,23 @@ func (cs *ControllerServer) CreateSnapshot(ctx context.Context, req *csi.CreateS
|
||||
ReadyToUse: false,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
if err != nil {
|
||||
case err != nil:
|
||||
uErr := undoSnapshotCloning(ctx, vol, rbdSnap, vol, cr)
|
||||
if uErr != nil {
|
||||
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s %v", req.GetName(), uErr)
|
||||
}
|
||||
return nil, status.Errorf(codes.Internal, err.Error())
|
||||
default:
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: rbdSnap.SizeBytes,
|
||||
SnapshotId: rbdSnap.VolID,
|
||||
SourceVolumeId: rbdSnap.SourceVolumeID,
|
||||
CreationTime: rbdSnap.CreatedAt,
|
||||
ReadyToUse: true,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
return &csi.CreateSnapshotResponse{
|
||||
Snapshot: &csi.Snapshot{
|
||||
SizeBytes: rbdSnap.SizeBytes,
|
||||
SnapshotId: rbdSnap.VolID,
|
||||
SourceVolumeId: rbdSnap.SourceVolumeID,
|
||||
CreationTime: rbdSnap.CreatedAt,
|
||||
ReadyToUse: true,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
err = flattenTemporaryClonedImages(ctx, rbdVol, cr)
|
||||
|
@ -267,18 +267,16 @@ func (ns *NodeServer) stageTransaction(ctx context.Context, req *csi.NodeStageVo
|
||||
return transaction, err
|
||||
}
|
||||
}
|
||||
if !util.CheckKernelSupport(kernelRelease, deepFlattenSupport) {
|
||||
if !skipForceFlatten {
|
||||
feature, err = volOptions.checkImageChainHasFeature(ctx, librbd.FeatureDeepFlatten)
|
||||
if !util.CheckKernelSupport(kernelRelease, deepFlattenSupport) && !skipForceFlatten {
|
||||
feature, err = volOptions.checkImageChainHasFeature(ctx, librbd.FeatureDeepFlatten)
|
||||
if err != nil {
|
||||
return transaction, err
|
||||
}
|
||||
if feature {
|
||||
err = volOptions.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
||||
if err != nil {
|
||||
return transaction, err
|
||||
}
|
||||
if feature {
|
||||
err = volOptions.flattenRbdImage(ctx, cr, true, rbdHardMaxCloneDepth, rbdSoftMaxCloneDepth)
|
||||
if err != nil {
|
||||
return transaction, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Mapping RBD image
|
||||
@ -468,9 +466,10 @@ func (ns *NodeServer) mountVolumeToStagePath(ctx context.Context, req *csi.NodeS
|
||||
|
||||
if existingFormat == "" && !staticVol && !readOnly {
|
||||
args := []string{}
|
||||
if fsType == "ext4" {
|
||||
switch fsType {
|
||||
case "ext4":
|
||||
args = []string{"-m0", "-Enodiscard,lazy_itable_init=1,lazy_journal_init=1", devicePath}
|
||||
} else if fsType == "xfs" {
|
||||
case "xfs":
|
||||
args = []string{"-K", devicePath}
|
||||
// always disable reflink
|
||||
// TODO: make enabling an option, see ceph/ceph-csi#1256
|
||||
@ -531,30 +530,30 @@ func (ns *NodeServer) mountVolume(ctx context.Context, stagingPath string, req *
|
||||
func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath string, isBlock bool) (bool, error) {
|
||||
// Check if that mount path exists properly
|
||||
notMnt, err := mount.IsNotMountPoint(ns.mounter, mountPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isBlock {
|
||||
// #nosec
|
||||
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0750)
|
||||
if e != nil {
|
||||
util.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err)
|
||||
return notMnt, status.Error(codes.Internal, e.Error())
|
||||
}
|
||||
if err = pathFile.Close(); err != nil {
|
||||
util.DebugLog(ctx, "Failed to close mountPath:%s with error: %v", mountPath, err)
|
||||
return notMnt, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
} else {
|
||||
// Create a directory
|
||||
if err = util.CreateMountPoint(mountPath); err != nil {
|
||||
return notMnt, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
notMnt = true
|
||||
} else {
|
||||
return false, status.Error(codes.Internal, err.Error())
|
||||
if err == nil {
|
||||
return notMnt, err
|
||||
}
|
||||
if !os.IsNotExist(err) {
|
||||
return false, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
if isBlock {
|
||||
// #nosec
|
||||
pathFile, e := os.OpenFile(mountPath, os.O_CREATE|os.O_RDWR, 0750)
|
||||
if e != nil {
|
||||
util.DebugLog(ctx, "Failed to create mountPath:%s with error: %v", mountPath, err)
|
||||
return notMnt, status.Error(codes.Internal, e.Error())
|
||||
}
|
||||
if err = pathFile.Close(); err != nil {
|
||||
util.DebugLog(ctx, "Failed to close mountPath:%s with error: %v", mountPath, err)
|
||||
return notMnt, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
} else {
|
||||
// Create a mountpath directory
|
||||
if err = util.CreateMountPoint(mountPath); err != nil {
|
||||
return notMnt, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
}
|
||||
notMnt = true
|
||||
return notMnt, err
|
||||
}
|
||||
|
||||
|
@ -277,10 +277,10 @@ func (rv *rbdVolume) Exists(ctx context.Context, parentVol *rbdVolume) (bool, er
|
||||
// Need to check cloned info here not on createvolume,
|
||||
if parentVol != nil {
|
||||
found, cErr := rv.checkCloneImage(ctx, parentVol)
|
||||
if found && cErr == nil {
|
||||
switch {
|
||||
case found && cErr == nil:
|
||||
return true, nil
|
||||
}
|
||||
if cErr != nil {
|
||||
case cErr != nil:
|
||||
return false, cErr
|
||||
}
|
||||
}
|
||||
|
@ -605,38 +605,40 @@ func (rv *rbdVolume) flattenRbdImage(ctx context.Context, cr *util.Credentials,
|
||||
util.ExtendedLog(ctx, "clone depth is (%d), configured softlimit (%d) and hardlimit (%d) for %s", depth, softlimit, hardlimit, rv)
|
||||
}
|
||||
|
||||
if forceFlatten || (depth >= hardlimit) || (depth >= softlimit) {
|
||||
args := []string{"flatten", rv.String(), "--id", cr.ID, "--keyfile=" + cr.KeyFile, "-m", rv.Monitors}
|
||||
supported, err := addRbdManagerTask(ctx, rv, args)
|
||||
if supported {
|
||||
if !forceFlatten && (depth < hardlimit) && (depth < softlimit) {
|
||||
return nil
|
||||
}
|
||||
args := []string{"flatten", rv.String(), "--id", cr.ID, "--keyfile=" + cr.KeyFile, "-m", rv.Monitors}
|
||||
supported, err := addRbdManagerTask(ctx, rv, args)
|
||||
if supported {
|
||||
if err != nil {
|
||||
// discard flattening error if the image does not have any parent
|
||||
rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", rv.Pool, rv.RbdImageName)
|
||||
if strings.Contains(err.Error(), rbdFlattenNoParent) {
|
||||
return nil
|
||||
}
|
||||
util.ErrorLog(ctx, "failed to add task flatten for %s : %v", rv, err)
|
||||
return err
|
||||
}
|
||||
if forceFlatten || depth >= hardlimit {
|
||||
return fmt.Errorf("%w: flatten is in progress for image %s", ErrFlattenInProgress, rv.RbdImageName)
|
||||
}
|
||||
}
|
||||
if !supported {
|
||||
util.ErrorLog(ctx, "task manager does not support flatten,image will be flattened once hardlimit is reached: %v", err)
|
||||
if forceFlatten || depth >= hardlimit {
|
||||
err = rv.Connect(cr)
|
||||
if err != nil {
|
||||
// discard flattening error if the image does not have any parent
|
||||
rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", rv.Pool, rv.RbdImageName)
|
||||
if strings.Contains(err.Error(), rbdFlattenNoParent) {
|
||||
return nil
|
||||
}
|
||||
util.ErrorLog(ctx, "failed to add task flatten for %s : %v", rv, err)
|
||||
return err
|
||||
}
|
||||
if forceFlatten || depth >= hardlimit {
|
||||
return fmt.Errorf("%w: flatten is in progress for image %s", ErrFlattenInProgress, rv.RbdImageName)
|
||||
}
|
||||
}
|
||||
if !supported {
|
||||
util.ErrorLog(ctx, "task manager does not support flatten,image will be flattened once hardlimit is reached: %v", err)
|
||||
if forceFlatten || depth >= hardlimit {
|
||||
err = rv.Connect(cr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err := rv.flatten()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "rbd failed to flatten image %s %s: %v", rv.Pool, rv.RbdImageName, err)
|
||||
return err
|
||||
}
|
||||
err := rv.flatten()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "rbd failed to flatten image %s %s: %v", rv.Pool, rv.RbdImageName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user