diff --git a/internal/cephfs/clone.go b/internal/cephfs/clone.go index 511482441..65138d8e5 100644 --- a/internal/cephfs/clone.go +++ b/internal/cephfs/clone.go @@ -100,7 +100,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO return cloneFailedErr case cephFSCloneComplete: // This is a work around to fix sizing issue for cloned images - err = resizeVolume(ctx, volOpt, cr, cloneID, volOpt.Size) + err = volOpt.resizeVolume(ctx, cr, cloneID, volOpt.Size) if err != nil { util.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err) return err @@ -179,7 +179,7 @@ func createCloneFromSnapshot(ctx context.Context, parentVolOpt, volOptions *volu // The clonedvolume currently does not reflect the proper size due to an issue in cephfs // however this is getting addressed in cephfs and the parentvolume size will be reflected // in the new cloned volume too. Till then we are explicitly making the size set - err = resizeVolume(ctx, volOptions, cr, volumeID(vID.FsSubvolName), volOptions.Size) + err = volOptions.resizeVolume(ctx, cr, volumeID(vID.FsSubvolName), volOptions.Size) if err != nil { util.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err) return err diff --git a/internal/cephfs/controllerserver.go b/internal/cephfs/controllerserver.go index 7313abe96..40271f048 100644 --- a/internal/cephfs/controllerserver.go +++ b/internal/cephfs/controllerserver.go @@ -186,7 +186,7 @@ func (cs *ControllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol // it will be fixed in cephfs soon with the parentvolume size. Till then by below // resize we are making sure we return or satisfy the requested size by setting the size // explictly - err = resizeVolume(ctx, volOptions, cr, volumeID(vID.FsSubvolName), volOptions.Size) + err = volOptions.resizeVolume(ctx, cr, volumeID(vID.FsSubvolName), volOptions.Size) if err != nil { purgeErr := purgeVolume(ctx, volumeID(vID.FsSubvolName), cr, volOptions, false) if purgeErr != nil { @@ -426,7 +426,7 @@ func (cs *ControllerServer) ControllerExpandVolume(ctx context.Context, req *csi RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes()) - if err = resizeVolume(ctx, volOptions, cr, volumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil { + if err = volOptions.resizeVolume(ctx, cr, volumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil { util.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(volIdentifier.FsSubvolName), err) return nil, status.Error(codes.Internal, err.Error()) } diff --git a/internal/cephfs/volume.go b/internal/cephfs/volume.go index 2d702f5ea..d81a12595 100644 --- a/internal/cephfs/volume.go +++ b/internal/cephfs/volume.go @@ -177,27 +177,27 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID // resizeVolume will try to use ceph fs subvolume resize command to resize the // subvolume. If the command is not available as a fallback it will use // CreateVolume to resize the subvolume. -func resizeVolume(ctx context.Context, volOptions *volumeOptions, cr *util.Credentials, volID volumeID, bytesQuota int64) error { +func (vo *volumeOptions) resizeVolume(ctx context.Context, cr *util.Credentials, volID volumeID, bytesQuota int64) error { // keyPresent checks whether corresponding clusterID key is present in clusterAdditionalInfo var keyPresent bool // verify if corresponding ClusterID key is present in the map, // and if not, initialize with default values(false). - if _, keyPresent = clusterAdditionalInfo[volOptions.ClusterID]; !keyPresent { - clusterAdditionalInfo[volOptions.ClusterID] = &localClusterState{} + if _, keyPresent = clusterAdditionalInfo[vo.ClusterID]; !keyPresent { + clusterAdditionalInfo[vo.ClusterID] = &localClusterState{} } // resize subvolume when either it's supported, or when corresponding // clusterID key was not present. - if clusterAdditionalInfo[volOptions.ClusterID].resizeSupported || !keyPresent { + if clusterAdditionalInfo[vo.ClusterID].resizeSupported || !keyPresent { args := []string{ "fs", "subvolume", "resize", - volOptions.FsName, + vo.FsName, string(volID), strconv.FormatInt(bytesQuota, 10), "--group_name", - volOptions.SubvolumeGroup, - "-m", volOptions.Monitors, + vo.SubvolumeGroup, + "-m", vo.Monitors, "-c", util.CephConfigPath, "-n", cephEntityClientPrefix + cr.ID, "--keyfile=" + cr.KeyFile, @@ -209,17 +209,17 @@ func resizeVolume(ctx context.Context, volOptions *volumeOptions, cr *util.Crede args[:]...) if err == nil { - clusterAdditionalInfo[volOptions.ClusterID].resizeSupported = true + clusterAdditionalInfo[vo.ClusterID].resizeSupported = true return nil } // Incase the error is other than invalid command return error to the caller. if !strings.Contains(err.Error(), invalidCommand) { - util.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), volOptions.FsName, err) + util.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err) return err } } - clusterAdditionalInfo[volOptions.ClusterID].resizeSupported = false - return createVolume(ctx, volOptions, volID, bytesQuota) + clusterAdditionalInfo[vo.ClusterID].resizeSupported = false + return createVolume(ctx, vo, volID, bytesQuota) } func purgeVolume(ctx context.Context, volID volumeID, cr *util.Credentials, volOptions *volumeOptions, force bool) error {