cephfs: refactor cephfs core functions

This commits refactors the cephfs core
functions with interfaces. This helps in
better code structuring and writing the
unit test cases.

update #852

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2022-02-15 17:41:09 +05:30 committed by mergify[bot]
parent f19ca4a473
commit e9802c4940
12 changed files with 416 additions and 316 deletions

View File

@ -23,6 +23,7 @@ import (
"github.com/ceph/ceph-csi/internal/cephfs/core" "github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
"github.com/ceph/ceph-csi/internal/cephfs/store"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
@ -55,12 +56,12 @@ type ControllerServer struct {
func (cs *ControllerServer) createBackingVolume( func (cs *ControllerServer) createBackingVolume(
ctx context.Context, ctx context.Context,
volOptions, volOptions,
parentVolOpt *core.VolumeOptions, parentVolOpt *store.VolumeOptions,
pvID *store.VolumeIdentifier,
vID, sID *store.SnapshotIdentifier) error {
pvID *core.VolumeIdentifier,
sID *core.SnapshotIdentifier) error {
var err error var err error
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
if sID != nil { if sID != nil {
if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil { if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil {
log.ErrorLog(ctx, err.Error()) log.ErrorLog(ctx, err.Error())
@ -68,8 +69,12 @@ func (cs *ControllerServer) createBackingVolume(
return status.Error(codes.Aborted, err.Error()) return status.Error(codes.Aborted, err.Error())
} }
defer cs.OperationLocks.ReleaseRestoreLock(sID.SnapshotID) defer cs.OperationLocks.ReleaseRestoreLock(sID.SnapshotID)
snap := core.Snapshot{
SnapshotID: sID.FsSnapshotName,
SubVolume: &parentVolOpt.SubVolume,
}
err = core.CreateCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID) err = volClient.CreateCloneFromSnapshot(ctx, snap)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err) log.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
@ -85,12 +90,8 @@ func (cs *ControllerServer) createBackingVolume(
return status.Error(codes.Aborted, err.Error()) return status.Error(codes.Aborted, err.Error())
} }
defer cs.OperationLocks.ReleaseCloneLock(pvID.VolumeID) defer cs.OperationLocks.ReleaseCloneLock(pvID.VolumeID)
err = core.CreateCloneFromSubvolume( err = volClient.CreateCloneFromSubvolume(
ctx, ctx, &parentVolOpt.SubVolume)
fsutil.VolumeID(pvID.FsSubvolName),
fsutil.VolumeID(vID.FsSubvolName),
volOptions,
parentVolOpt)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", fsutil.VolumeID(pvID.FsSubvolName), err) log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", fsutil.VolumeID(pvID.FsSubvolName), err)
@ -99,8 +100,7 @@ func (cs *ControllerServer) createBackingVolume(
return nil return nil
} }
if err = volClient.CreateVolume(ctx); err != nil {
if err = core.CreateVolume(ctx, volOptions, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size); err != nil {
log.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err) log.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err)
return status.Error(codes.Internal, err.Error()) return status.Error(codes.Internal, err.Error())
@ -112,7 +112,7 @@ func (cs *ControllerServer) createBackingVolume(
func checkContentSource( func checkContentSource(
ctx context.Context, ctx context.Context,
req *csi.CreateVolumeRequest, req *csi.CreateVolumeRequest,
cr *util.Credentials) (*core.VolumeOptions, *core.VolumeIdentifier, *core.SnapshotIdentifier, error) { cr *util.Credentials) (*store.VolumeOptions, *store.VolumeIdentifier, *store.SnapshotIdentifier, error) {
if req.VolumeContentSource == nil { if req.VolumeContentSource == nil {
return nil, nil, nil, nil return nil, nil, nil, nil
} }
@ -120,7 +120,7 @@ func checkContentSource(
switch volumeSource.Type.(type) { switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot: case *csi.VolumeContentSource_Snapshot:
snapshotID := req.VolumeContentSource.GetSnapshot().GetSnapshotId() snapshotID := req.VolumeContentSource.GetSnapshot().GetSnapshotId()
volOpt, _, sid, err := core.NewSnapshotOptionsFromID(ctx, snapshotID, cr) volOpt, _, sid, err := store.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
if err != nil { if err != nil {
if errors.Is(err, cerrors.ErrSnapNotFound) { if errors.Is(err, cerrors.ErrSnapNotFound) {
return nil, nil, nil, status.Error(codes.NotFound, err.Error()) return nil, nil, nil, status.Error(codes.NotFound, err.Error())
@ -133,7 +133,7 @@ func checkContentSource(
case *csi.VolumeContentSource_Volume: case *csi.VolumeContentSource_Volume:
// Find the volume using the provided VolumeID // Find the volume using the provided VolumeID
volID := req.VolumeContentSource.GetVolume().GetVolumeId() volID := req.VolumeContentSource.GetVolume().GetVolumeId()
parentVol, pvID, err := core.NewVolumeOptionsFromVolID(ctx, volID, nil, req.Secrets) parentVol, pvID, err := store.NewVolumeOptionsFromVolID(ctx, volID, nil, req.Secrets)
if err != nil { if err != nil {
if !errors.Is(err, cerrors.ErrVolumeNotFound) { if !errors.Is(err, cerrors.ErrVolumeNotFound) {
return nil, nil, nil, status.Error(codes.NotFound, err.Error()) return nil, nil, nil, status.Error(codes.NotFound, err.Error())
@ -179,7 +179,7 @@ func (cs *ControllerServer) CreateVolume(
} }
defer cs.VolumeLocks.Release(requestName) defer cs.VolumeLocks.Release(requestName)
volOptions, err := core.NewVolumeOptions(ctx, requestName, req, cr) volOptions, err := store.NewVolumeOptions(ctx, requestName, req, cr)
if err != nil { if err != nil {
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err) log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
@ -199,7 +199,7 @@ func (cs *ControllerServer) CreateVolume(
defer parentVol.Destroy() defer parentVol.Destroy()
} }
vID, err := core.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr) vID, err := store.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr)
if err != nil { if err != nil {
if cerrors.IsCloneRetryError(err) { if cerrors.IsCloneRetryError(err) {
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
@ -211,9 +211,10 @@ func (cs *ControllerServer) CreateVolume(
if vID != nil { if vID != nil {
if sID != nil || pvID != nil { if sID != nil || pvID != nil {
err = volOptions.ExpandVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size) volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
err = volClient.ExpandVolume(ctx, volOptions.Size)
if err != nil { if err != nil {
purgeErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), false) purgeErr := volClient.PurgeVolume(ctx, false)
if purgeErr != nil { if purgeErr != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", requestName, purgeErr) log.ErrorLog(ctx, "failed to delete volume %s: %v", requestName, purgeErr)
// All errors other than ErrVolumeNotFound should return an error back to the caller // All errors other than ErrVolumeNotFound should return an error back to the caller
@ -221,7 +222,7 @@ func (cs *ControllerServer) CreateVolume(
return nil, status.Error(codes.Internal, purgeErr.Error()) return nil, status.Error(codes.Internal, purgeErr.Error())
} }
} }
errUndo := core.UndoVolReservation(ctx, volOptions, *vID, secret) errUndo := store.UndoVolReservation(ctx, volOptions, *vID, secret)
if errUndo != nil { if errUndo != nil {
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
requestName, errUndo) requestName, errUndo)
@ -254,7 +255,7 @@ func (cs *ControllerServer) CreateVolume(
} }
// Reservation // Reservation
vID, err = core.ReserveVol(ctx, volOptions, secret) vID, err = store.ReserveVol(ctx, volOptions, secret)
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -262,7 +263,7 @@ func (cs *ControllerServer) CreateVolume(
defer func() { defer func() {
if err != nil { if err != nil {
if !cerrors.IsCloneRetryError(err) { if !cerrors.IsCloneRetryError(err) {
errDefer := core.UndoVolReservation(ctx, volOptions, *vID, secret) errDefer := store.UndoVolReservation(ctx, volOptions, *vID, secret)
if errDefer != nil { if errDefer != nil {
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)", log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
requestName, errDefer) requestName, errDefer)
@ -272,7 +273,7 @@ func (cs *ControllerServer) CreateVolume(
}() }()
// Create a volume // Create a volume
err = cs.createBackingVolume(ctx, volOptions, parentVol, vID, pvID, sID) err = cs.createBackingVolume(ctx, volOptions, parentVol, pvID, sID)
if err != nil { if err != nil {
if cerrors.IsCloneRetryError(err) { if cerrors.IsCloneRetryError(err) {
return nil, status.Error(codes.Aborted, err.Error()) return nil, status.Error(codes.Aborted, err.Error())
@ -281,9 +282,10 @@ func (cs *ControllerServer) CreateVolume(
return nil, err return nil, err
} }
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vID.FsSubvolName)) volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
volOptions.RootPath, err = volClient.GetVolumeRootPathCeph(ctx)
if err != nil { if err != nil {
purgeErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), true) purgeErr := volClient.PurgeVolume(ctx, true)
if purgeErr != nil { if purgeErr != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr) log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr)
// All errors other than ErrVolumeNotFound should return an error back to the caller // All errors other than ErrVolumeNotFound should return an error back to the caller
@ -355,7 +357,7 @@ func (cs *ControllerServer) DeleteVolume(
defer cs.OperationLocks.ReleaseDeleteLock(req.GetVolumeId()) defer cs.OperationLocks.ReleaseDeleteLock(req.GetVolumeId())
// Find the volume using the provided VolumeID // Find the volume using the provided VolumeID
volOptions, vID, err := core.NewVolumeOptionsFromVolID(ctx, string(volID), nil, secrets) volOptions, vID, err := store.NewVolumeOptionsFromVolID(ctx, string(volID), nil, secrets)
if err != nil { if err != nil {
// if error is ErrPoolNotFound, the pool is already deleted we dont // if error is ErrPoolNotFound, the pool is already deleted we dont
// need to worry about deleting subvolume or omap data, return success // need to worry about deleting subvolume or omap data, return success
@ -386,7 +388,7 @@ func (cs *ControllerServer) DeleteVolume(
} }
defer cs.VolumeLocks.Release(volOptions.RequestName) defer cs.VolumeLocks.Release(volOptions.RequestName)
if err = core.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil { if err = store.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -410,7 +412,8 @@ func (cs *ControllerServer) DeleteVolume(
} }
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
if err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), false); err != nil { volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
if err = volClient.PurgeVolume(ctx, false); err != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err) log.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err)
if errors.Is(err, cerrors.ErrVolumeHasSnapshots) { if errors.Is(err, cerrors.ErrVolumeHasSnapshots) {
return nil, status.Error(codes.FailedPrecondition, err.Error()) return nil, status.Error(codes.FailedPrecondition, err.Error())
@ -421,7 +424,7 @@ func (cs *ControllerServer) DeleteVolume(
} }
} }
if err := core.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil { if err := store.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -484,7 +487,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
} }
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
volOptions, volIdentifier, err := core.NewVolumeOptionsFromVolID(ctx, volID, nil, secret) volOptions, volIdentifier, err := store.NewVolumeOptionsFromVolID(ctx, volID, nil, secret)
if err != nil { if err != nil {
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err) log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
@ -493,8 +496,8 @@ func (cs *ControllerServer) ControllerExpandVolume(
defer volOptions.Destroy() defer volOptions.Destroy()
RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes()) RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
volClient := core.NewSubVolume(volOptions.GetConnection(), &volOptions.SubVolume, volOptions.ClusterID)
if err = volOptions.ResizeVolume(ctx, fsutil.VolumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil { if err = volClient.ResizeVolume(ctx, RoundOffSize); err != nil {
log.ErrorLog(ctx, "failed to expand volume %s: %v", fsutil.VolumeID(volIdentifier.FsSubvolName), err) log.ErrorLog(ctx, "failed to expand volume %s: %v", fsutil.VolumeID(volIdentifier.FsSubvolName), err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -521,7 +524,7 @@ func (cs *ControllerServer) CreateSnapshot(
} }
defer cr.DeleteCredentials() defer cr.DeleteCredentials()
clusterData, err := core.GetClusterInformation(req.GetParameters()) clusterData, err := store.GetClusterInformation(req.GetParameters())
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -545,7 +548,7 @@ func (cs *ControllerServer) CreateSnapshot(
defer cs.OperationLocks.ReleaseSnapshotCreateLock(sourceVolID) defer cs.OperationLocks.ReleaseSnapshotCreateLock(sourceVolID)
// Find the volume using the provided VolumeID // Find the volume using the provided VolumeID
parentVolOptions, vid, err := core.NewVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets()) parentVolOptions, vid, err := store.NewVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets())
if err != nil { if err != nil {
if errors.Is(err, util.ErrPoolNotFound) { if errors.Is(err, util.ErrPoolNotFound) {
log.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err) log.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err)
@ -569,7 +572,7 @@ func (cs *ControllerServer) CreateSnapshot(
parentVolOptions.ClusterID) parentVolOptions.ClusterID)
} }
cephfsSnap, genSnapErr := core.GenSnapFromOptions(ctx, req) cephfsSnap, genSnapErr := store.GenSnapFromOptions(ctx, req)
if genSnapErr != nil { if genSnapErr != nil {
return nil, status.Error(codes.Internal, genSnapErr.Error()) return nil, status.Error(codes.Internal, genSnapErr.Error())
} }
@ -582,7 +585,7 @@ func (cs *ControllerServer) CreateSnapshot(
} }
defer cs.VolumeLocks.Release(sourceVolID) defer cs.VolumeLocks.Release(sourceVolID)
snapName := req.GetName() snapName := req.GetName()
sid, snapInfo, err := core.CheckSnapExists(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr) sid, snapInfo, err := store.CheckSnapExists(ctx, parentVolOptions, cephfsSnap, cr)
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -591,8 +594,12 @@ func (cs *ControllerServer) CreateSnapshot(
// ceph fs subvolume info command got added in 14.2.10 and 15.+ // ceph fs subvolume info command got added in 14.2.10 and 15.+
// as we are not able to retrieve the parent size we are rejecting the // as we are not able to retrieve the parent size we are rejecting the
// request to create snapshot. // request to create snapshot.
// TODO: For this purpose we could make use of cached clusterAdditionalInfo too. // TODO: For this purpose we could make use of cached clusterAdditionalInfo
info, err := parentVolOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(vid.FsSubvolName)) // too.
volClient := core.NewSubVolume(parentVolOptions.GetConnection(),
&parentVolOptions.SubVolume,
parentVolOptions.ClusterID)
info, err := volClient.GetSubVolumeInfo(ctx)
if err != nil { if err != nil {
// Check error code value against ErrInvalidCommand to understand the cluster // Check error code value against ErrInvalidCommand to understand the cluster
// support it or not, It's safe to evaluate as the filtering // support it or not, It's safe to evaluate as the filtering
@ -603,7 +610,7 @@ func (cs *ControllerServer) CreateSnapshot(
"subvolume info command not supported in current ceph cluster") "subvolume info command not supported in current ceph cluster")
} }
if sid != nil { if sid != nil {
errDefer := core.UndoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr) errDefer := store.UndoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr)
if errDefer != nil { if errDefer != nil {
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)", log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
requestName, errDefer) requestName, errDefer)
@ -617,7 +624,8 @@ func (cs *ControllerServer) CreateSnapshot(
// check snapshot is protected // check snapshot is protected
protected := true protected := true
if !(snapInfo.Protected == core.SnapshotIsProtected) { if !(snapInfo.Protected == core.SnapshotIsProtected) {
err = parentVolOptions.ProtectSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(vid.FsSubvolName)) snapClient := core.NewSnapshot(parentVolOptions.GetConnection(), sid.FsSnapshotName, &parentVolOptions.SubVolume)
err = snapClient.ProtectSnapshot(ctx)
if err != nil { if err != nil {
protected = false protected = false
log.WarningLog(ctx, "failed to protect snapshot of snapshot: %s (%s)", log.WarningLog(ctx, "failed to protect snapshot of snapshot: %s (%s)",
@ -637,20 +645,20 @@ func (cs *ControllerServer) CreateSnapshot(
} }
// Reservation // Reservation
sID, err := core.ReserveSnap(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr) sID, err := store.ReserveSnap(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
defer func() { defer func() {
if err != nil { if err != nil {
errDefer := core.UndoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr) errDefer := store.UndoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr)
if errDefer != nil { if errDefer != nil {
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)", log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
requestName, errDefer) requestName, errDefer)
} }
} }
}() }()
snap, err := doSnapshot(ctx, parentVolOptions, vid.FsSubvolName, sID.FsSnapshotName) snap, err := doSnapshot(ctx, parentVolOptions, sID.FsSnapshotName)
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -668,13 +676,12 @@ func (cs *ControllerServer) CreateSnapshot(
func doSnapshot( func doSnapshot(
ctx context.Context, ctx context.Context,
volOpt *core.VolumeOptions, volOpt *store.VolumeOptions,
subvolumeName,
snapshotName string) (core.SnapshotInfo, error) { snapshotName string) (core.SnapshotInfo, error) {
volID := fsutil.VolumeID(subvolumeName)
snapID := fsutil.VolumeID(snapshotName) snapID := fsutil.VolumeID(snapshotName)
snap := core.SnapshotInfo{} snap := core.SnapshotInfo{}
err := volOpt.CreateSnapshot(ctx, snapID, volID) snapClient := core.NewSnapshot(volOpt.GetConnection(), snapshotName, &volOpt.SubVolume)
err := snapClient.CreateSnapshot(ctx)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err) log.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err)
@ -682,13 +689,13 @@ func doSnapshot(
} }
defer func() { defer func() {
if err != nil { if err != nil {
dErr := volOpt.DeleteSnapshot(ctx, snapID, volID) dErr := snapClient.DeleteSnapshot(ctx)
if dErr != nil { if dErr != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapID, err)
} }
} }
}() }()
snap, err = volOpt.GetSnapshotInfo(ctx, snapID, volID) snap, err = snapClient.GetSnapshotInfo(ctx)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err) log.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err)
@ -700,7 +707,7 @@ func doSnapshot(
return snap, err return snap, err
} }
snap.CreationTime = t snap.CreationTime = t
err = volOpt.ProtectSnapshot(ctx, snapID, volID) err = snapClient.ProtectSnapshot(ctx)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err) log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err)
} }
@ -764,7 +771,7 @@ func (cs *ControllerServer) DeleteSnapshot(
} }
defer cs.OperationLocks.ReleaseDeleteLock(snapshotID) defer cs.OperationLocks.ReleaseDeleteLock(snapshotID)
volOpt, snapInfo, sid, err := core.NewSnapshotOptionsFromID(ctx, snapshotID, cr) volOpt, snapInfo, sid, err := store.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
if err != nil { if err != nil {
switch { switch {
case errors.Is(err, util.ErrPoolNotFound): case errors.Is(err, util.ErrPoolNotFound):
@ -779,10 +786,10 @@ func (cs *ControllerServer) DeleteSnapshot(
// success as deletion is complete // success as deletion is complete
return &csi.DeleteSnapshotResponse{}, nil return &csi.DeleteSnapshotResponse{}, nil
case errors.Is(err, cerrors.ErrSnapNotFound): case errors.Is(err, cerrors.ErrSnapNotFound):
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr) err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)", log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.RequestName, sid.FsSnapshotName, err) sid.FsSubvolName, sid.FsSnapshotName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -792,10 +799,10 @@ func (cs *ControllerServer) DeleteSnapshot(
// if the error is ErrVolumeNotFound, the subvolume is already deleted // if the error is ErrVolumeNotFound, the subvolume is already deleted
// from backend, Hence undo the omap entries and return success // from backend, Hence undo the omap entries and return success
log.ErrorLog(ctx, "Volume not present") log.ErrorLog(ctx, "Volume not present")
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr) err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)", log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.RequestName, sid.FsSnapshotName, err) sid.FsSubvolName, sid.FsSnapshotName, err)
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -819,17 +826,18 @@ func (cs *ControllerServer) DeleteSnapshot(
if snapInfo.HasPendingClones == "yes" { if snapInfo.HasPendingClones == "yes" {
return nil, status.Errorf(codes.FailedPrecondition, "snapshot %s has pending clones", snapshotID) return nil, status.Errorf(codes.FailedPrecondition, "snapshot %s has pending clones", snapshotID)
} }
snapClient := core.NewSnapshot(volOpt.GetConnection(), sid.FsSnapshotName, &volOpt.SubVolume)
if snapInfo.Protected == core.SnapshotIsProtected { if snapInfo.Protected == core.SnapshotIsProtected {
err = volOpt.UnprotectSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName)) err = snapClient.UnprotectSnapshot(ctx)
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
} }
err = volOpt.DeleteSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName)) err = snapClient.DeleteSnapshot(ctx)
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.RequestName, cr) err = store.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)", log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.RequestName, sid.FsSnapshotName, err) sid.RequestName, sid.FsSnapshotName, err)

View File

@ -21,7 +21,6 @@ import (
"errors" "errors"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util/log" "github.com/ceph/ceph-csi/internal/util/log"
) )
@ -29,16 +28,16 @@ import (
type cephFSCloneState string type cephFSCloneState string
const ( const (
// cephFSCloneError indicates that fetching the clone state returned an error. // CephFSCloneError indicates that fetching the clone state returned an error.
cephFSCloneError = cephFSCloneState("") CephFSCloneError = cephFSCloneState("")
// cephFSCloneFailed indicates that clone is in failed state. // CephFSCloneFailed indicates that clone is in failed state.
cephFSCloneFailed = cephFSCloneState("failed") CephFSCloneFailed = cephFSCloneState("failed")
// cephFSClonePending indicates that clone is in pending state. // CephFSClonePending indicates that clone is in pending state.
cephFSClonePending = cephFSCloneState("pending") CephFSClonePending = cephFSCloneState("pending")
// cephFSCloneInprogress indicates that clone is in in-progress state. // CephFSCloneInprogress indicates that clone is in in-progress state.
cephFSCloneInprogress = cephFSCloneState("in-progress") CephFSCloneInprogress = cephFSCloneState("in-progress")
// cephFSCloneComplete indicates that clone is in complete state. // CephFSCloneComplete indicates that clone is in complete state.
cephFSCloneComplete = cephFSCloneState("complete") CephFSCloneComplete = cephFSCloneState("complete")
// SnapshotIsProtected string indicates that the snapshot is currently protected. // SnapshotIsProtected string indicates that the snapshot is currently protected.
SnapshotIsProtected = "yes" SnapshotIsProtected = "yes"
@ -47,28 +46,28 @@ const (
// toError checks the state of the clone if it's not cephFSCloneComplete. // toError checks the state of the clone if it's not cephFSCloneComplete.
func (cs cephFSCloneState) toError() error { func (cs cephFSCloneState) toError() error {
switch cs { switch cs {
case cephFSCloneComplete: case CephFSCloneComplete:
return nil return nil
case cephFSCloneError: case CephFSCloneError:
return cerrors.ErrInvalidClone return cerrors.ErrInvalidClone
case cephFSCloneInprogress: case CephFSCloneInprogress:
return cerrors.ErrCloneInProgress return cerrors.ErrCloneInProgress
case cephFSClonePending: case CephFSClonePending:
return cerrors.ErrClonePending return cerrors.ErrClonePending
case cephFSCloneFailed: case CephFSCloneFailed:
return cerrors.ErrCloneFailed return cerrors.ErrCloneFailed
} }
return nil return nil
} }
func CreateCloneFromSubvolume( // CreateCloneFromSubvolume creates a clone from a subvolume.
func (s *subVolumeClient) CreateCloneFromSubvolume(
ctx context.Context, ctx context.Context,
volID, cloneID fsutil.VolumeID, parentvolOpt *SubVolume) error {
volOpt, snapshotID := s.VolID
parentvolOpt *VolumeOptions) error { snapClient := NewSnapshot(s.conn, snapshotID, parentvolOpt)
snapshotID := cloneID err := snapClient.CreateSnapshot(ctx)
err := parentvolOpt.CreateSnapshot(ctx, snapshotID, volID)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err)
@ -82,17 +81,17 @@ func CreateCloneFromSubvolume(
) )
defer func() { defer func() {
if protectErr != nil { if protectErr != nil {
err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID) err = snapClient.DeleteSnapshot(ctx)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
} }
} }
if cloneErr != nil { if cloneErr != nil {
if err = volOpt.PurgeVolume(ctx, cloneID, true); err != nil { if err = s.PurgeVolume(ctx, true); err != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err) log.ErrorLog(ctx, "failed to delete volume %s: %v", s.VolID, err)
} }
if err = parentvolOpt.UnprotectSnapshot(ctx, snapshotID, volID); err != nil { if err = snapClient.UnprotectSnapshot(ctx); err != nil {
// In case the snap is already unprotected we get ErrSnapProtectionExist error code // In case the snap is already unprotected we get ErrSnapProtectionExist error code
// in that case we are safe and we could discard this error and we are good to go // in that case we are safe and we could discard this error and we are good to go
// ahead with deletion // ahead with deletion
@ -100,47 +99,46 @@ func CreateCloneFromSubvolume(
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
} }
} }
if err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID); err != nil { if err = snapClient.DeleteSnapshot(ctx); err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
} }
} }
}() }()
protectErr = parentvolOpt.ProtectSnapshot(ctx, snapshotID, volID) protectErr = snapClient.ProtectSnapshot(ctx)
if protectErr != nil { if protectErr != nil {
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr) log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr)
return protectErr return protectErr
} }
cloneErr = snapClient.CloneSnapshot(ctx, s.SubVolume)
cloneErr = parentvolOpt.cloneSnapshot(ctx, volID, snapshotID, cloneID, volOpt)
if cloneErr != nil { if cloneErr != nil {
log.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", volID, snapshotID, cloneID, cloneErr) log.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", parentvolOpt.VolID, snapshotID, s.VolID, cloneErr)
return cloneErr return cloneErr
} }
cloneState, cloneErr := volOpt.getCloneState(ctx, cloneID) cloneState, cloneErr := s.GetCloneState(ctx)
if cloneErr != nil { if cloneErr != nil {
log.ErrorLog(ctx, "failed to get clone state: %v", cloneErr) log.ErrorLog(ctx, "failed to get clone state: %v", cloneErr)
return cloneErr return cloneErr
} }
if cloneState != cephFSCloneComplete { if cloneState != CephFSCloneComplete {
log.ErrorLog(ctx, "clone %s did not complete: %v", cloneID, cloneState.toError()) log.ErrorLog(ctx, "clone %s did not complete: %v", s.VolID, cloneState.toError())
return cloneState.toError() return cloneState.toError()
} }
err = volOpt.ExpandVolume(ctx, cloneID, volOpt.Size) err = s.ExpandVolume(ctx, s.Size)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err) log.ErrorLog(ctx, "failed to expand volume %s: %v", s.VolID, err)
return err return err
} }
// As we completed clone, remove the intermediate snap // As we completed clone, remove the intermediate snap
if err = parentvolOpt.UnprotectSnapshot(ctx, snapshotID, volID); err != nil { if err = snapClient.UnprotectSnapshot(ctx); err != nil {
// In case the snap is already unprotected we get ErrSnapProtectionExist error code // In case the snap is already unprotected we get ErrSnapProtectionExist error code
// in that case we are safe and we could discard this error and we are good to go // in that case we are safe and we could discard this error and we are good to go
// ahead with deletion // ahead with deletion
@ -150,7 +148,7 @@ func CreateCloneFromSubvolume(
return err return err
} }
} }
if err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID); err != nil { if err = snapClient.DeleteSnapshot(ctx); err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
return err return err
@ -159,14 +157,14 @@ func CreateCloneFromSubvolume(
return nil return nil
} }
func cleanupCloneFromSubvolumeSnapshot( // CleanupSnapshotFromSubvolume removes the snapshot from the subvolume.
ctx context.Context, func (s *subVolumeClient) CleanupSnapshotFromSubvolume(
volID, cloneID fsutil.VolumeID, ctx context.Context, parentVol *SubVolume) error {
parentVolOpt *VolumeOptions) error {
// snapshot name is same as clone name as we need a name which can be // snapshot name is same as clone name as we need a name which can be
// identified during PVC-PVC cloning. // identified during PVC-PVC cloning.
snapShotID := cloneID snapShotID := s.VolID
snapInfo, err := parentVolOpt.GetSnapshotInfo(ctx, snapShotID, volID) snapClient := NewSnapshot(s.conn, snapShotID, parentVol)
snapInfo, err := snapClient.GetSnapshotInfo(ctx)
if err != nil { if err != nil {
if errors.Is(err, cerrors.ErrSnapNotFound) { if errors.Is(err, cerrors.ErrSnapNotFound) {
return nil return nil
@ -176,14 +174,14 @@ func cleanupCloneFromSubvolumeSnapshot(
} }
if snapInfo.Protected == SnapshotIsProtected { if snapInfo.Protected == SnapshotIsProtected {
err = parentVolOpt.UnprotectSnapshot(ctx, snapShotID, volID) err = snapClient.UnprotectSnapshot(ctx)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err) log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err)
return err return err
} }
} }
err = parentVolOpt.DeleteSnapshot(ctx, snapShotID, volID) err = snapClient.DeleteSnapshot(ctx)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err) log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err)
@ -193,45 +191,39 @@ func cleanupCloneFromSubvolumeSnapshot(
return nil return nil
} }
func CreateCloneFromSnapshot( // CreateSnapshotFromSubvolume creates a clone from subvolume snapshot.
ctx context.Context, func (s *subVolumeClient) CreateCloneFromSnapshot(
parentVolOpt, volOptions *VolumeOptions, ctx context.Context, snap Snapshot) error {
vID *VolumeIdentifier, snapID := snap.SnapshotID
sID *SnapshotIdentifier) error { snapClient := NewSnapshot(s.conn, snapID, snap.SubVolume)
snapID := fsutil.VolumeID(sID.FsSnapshotName) err := snapClient.CloneSnapshot(ctx, s.SubVolume)
err := parentVolOpt.cloneSnapshot(
ctx,
fsutil.VolumeID(sID.FsSubvolName),
snapID,
fsutil.VolumeID(vID.FsSubvolName),
volOptions)
if err != nil { if err != nil {
return err return err
} }
defer func() { defer func() {
if err != nil { if err != nil {
if !cerrors.IsCloneRetryError(err) { if !cerrors.IsCloneRetryError(err) {
if dErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), true); dErr != nil { if dErr := s.PurgeVolume(ctx, true); dErr != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, dErr) log.ErrorLog(ctx, "failed to delete volume %s: %v", s.VolID, dErr)
} }
} }
} }
}() }()
cloneState, err := volOptions.getCloneState(ctx, fsutil.VolumeID(vID.FsSubvolName)) cloneState, err := s.GetCloneState(ctx)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to get clone state: %v", err) log.ErrorLog(ctx, "failed to get clone state: %v", err)
return err return err
} }
if cloneState != cephFSCloneComplete { if cloneState != CephFSCloneComplete {
return cloneState.toError() return cloneState.toError()
} }
err = volOptions.ExpandVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size) err = s.ExpandVolume(ctx, s.Size)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err) log.ErrorLog(ctx, "failed to expand volume %s with error: %v", s.VolID, err)
return err return err
} }
@ -239,24 +231,25 @@ func CreateCloneFromSnapshot(
return nil return nil
} }
func (vo *VolumeOptions) getCloneState(ctx context.Context, volID fsutil.VolumeID) (cephFSCloneState, error) { // GetCloneState returns the clone state of the subvolume.
fsa, err := vo.conn.GetFSAdmin() func (s *subVolumeClient) GetCloneState(ctx context.Context) (cephFSCloneState, error) {
fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog( log.ErrorLog(
ctx, ctx,
"could not get FSAdmin, can get clone status for volume %s with ID %s: %v", "could not get FSAdmin, can get clone status for volume %s with ID %s: %v",
vo.FsName, s.FsName,
string(volID), s.VolID,
err) err)
return cephFSCloneError, err return CephFSCloneError, err
} }
cs, err := fsa.CloneStatus(vo.FsName, vo.SubvolumeGroup, string(volID)) cs, err := fsa.CloneStatus(s.FsName, s.SubvolumeGroup, s.VolID)
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get clone state for volume %s with ID %s: %v", vo.FsName, string(volID), err) log.ErrorLog(ctx, "could not get clone state for volume %s with ID %s: %v", s.FsName, s.VolID, err)
return cephFSCloneError, err return CephFSCloneError, err
} }
return cephFSCloneState(cs.State), nil return cephFSCloneState(cs.State), nil

View File

@ -27,11 +27,11 @@ import (
func TestCloneStateToError(t *testing.T) { func TestCloneStateToError(t *testing.T) {
t.Parallel() t.Parallel()
errorState := make(map[cephFSCloneState]error) errorState := make(map[cephFSCloneState]error)
errorState[cephFSCloneComplete] = nil errorState[CephFSCloneComplete] = nil
errorState[cephFSCloneError] = cerrors.ErrInvalidClone errorState[CephFSCloneError] = cerrors.ErrInvalidClone
errorState[cephFSCloneInprogress] = cerrors.ErrCloneInProgress errorState[CephFSCloneInprogress] = cerrors.ErrCloneInProgress
errorState[cephFSClonePending] = cerrors.ErrClonePending errorState[CephFSClonePending] = cerrors.ErrClonePending
errorState[cephFSCloneFailed] = cerrors.ErrCloneFailed errorState[CephFSCloneFailed] = cerrors.ErrCloneFailed
for state, err := range errorState { for state, err := range errorState {
assert.Equal(t, state.toError(), err) assert.Equal(t, state.toError(), err)

View File

@ -22,7 +22,7 @@ import (
"time" "time"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log" "github.com/ceph/ceph-csi/internal/util/log"
"github.com/ceph/go-ceph/cephfs/admin" "github.com/ceph/go-ceph/cephfs/admin"
@ -36,32 +36,59 @@ const (
autoProtect = "snapshot-autoprotect" autoProtect = "snapshot-autoprotect"
) )
// CephfsSnapshot represents a CSI snapshot and its cluster information. // SnapshotClient is the interface that holds the signature of snapshot methods
type CephfsSnapshot struct { // that interacts with CephFS snapshot API's.
NamePrefix string type SnapshotClient interface {
Monitors string // CreateSnapshot creates a snapshot of the subvolume.
// MetadataPool & Pool fields are not used atm. But its definitely good to have it in this struct CreateSnapshot(ctx context.Context) error
// so keeping it here // DeleteSnapshot deletes the snapshot of the subvolume.
MetadataPool string DeleteSnapshot(ctx context.Context) error
Pool string // GetSnapshotInfo returns the snapshot info of the subvolume.
ClusterID string GetSnapshotInfo(ctx context.Context) (SnapshotInfo, error)
RequestName string // ProtectSnapshot protects the snapshot of the subvolume.
// ReservedID represents the ID reserved for a snapshot ProtectSnapshot(ctx context.Context) error
ReservedID string // UnprotectSnapshot unprotects the snapshot of the subvolume.
UnprotectSnapshot(ctx context.Context) error
// CloneSnapshot clones the snapshot of the subvolume.
CloneSnapshot(ctx context.Context, cloneVolOptions *SubVolume) error
} }
func (vo *VolumeOptions) CreateSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error { // snapshotClient is the implementation of SnapshotClient interface.
fsa, err := vo.conn.GetFSAdmin() type snapshotClient struct {
*Snapshot // Embedded snapshot struct.
conn *util.ClusterConnection // Cluster connection.
}
// Snapshot represents a subvolume snapshot and its cluster information.
type Snapshot struct {
SnapshotID string // subvolume snapshot id.
*SubVolume // parent subvolume information.
}
// NewSnapshot creates a new snapshot client.
func NewSnapshot(conn *util.ClusterConnection, snapshotID string, vol *SubVolume) SnapshotClient {
return &snapshotClient{
Snapshot: &Snapshot{
SnapshotID: snapshotID,
SubVolume: vol,
},
conn: conn,
}
}
// CreateSnapshot creates a snapshot of the subvolume.
func (s *snapshotClient) CreateSnapshot(ctx context.Context) error {
fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
err = fsa.CreateSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID)) err = fsa.CreateSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to create subvolume snapshot %s %s in fs %s: %s", log.ErrorLog(ctx, "failed to create subvolume snapshot %s %s in fs %s: %s",
string(snapID), string(volID), vo.FsName, err) s.SnapshotID, s.VolID, s.FsName, err)
return err return err
} }
@ -69,18 +96,19 @@ func (vo *VolumeOptions) CreateSnapshot(ctx context.Context, snapID, volID fsuti
return nil return nil
} }
func (vo *VolumeOptions) DeleteSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error { // DeleteSnapshot deletes the snapshot of the subvolume.
fsa, err := vo.conn.GetFSAdmin() func (s *snapshotClient) DeleteSnapshot(ctx context.Context) error {
fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
err = fsa.ForceRemoveSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID)) err = fsa.ForceRemoveSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to delete subvolume snapshot %s %s in fs %s: %s", log.ErrorLog(ctx, "failed to delete subvolume snapshot %s %s in fs %s: %s",
string(snapID), string(volID), vo.FsName, err) s.SnapshotID, s.VolID, s.FsName, err)
return err return err
} }
@ -95,16 +123,17 @@ type SnapshotInfo struct {
Protected string Protected string
} }
func (vo *VolumeOptions) GetSnapshotInfo(ctx context.Context, snapID, volID fsutil.VolumeID) (SnapshotInfo, error) { // GetSnapshotInfo returns the snapshot info of the subvolume.
func (s *snapshotClient) GetSnapshotInfo(ctx context.Context) (SnapshotInfo, error) {
snap := SnapshotInfo{} snap := SnapshotInfo{}
fsa, err := vo.conn.GetFSAdmin() fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return snap, err return snap, err
} }
info, err := fsa.SubVolumeSnapshotInfo(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID)) info, err := fsa.SubVolumeSnapshotInfo(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID)
if err != nil { if err != nil {
if errors.Is(err, rados.ErrNotFound) { if errors.Is(err, rados.ErrNotFound) {
return snap, cerrors.ErrSnapNotFound return snap, cerrors.ErrSnapNotFound
@ -112,9 +141,9 @@ func (vo *VolumeOptions) GetSnapshotInfo(ctx context.Context, snapID, volID fsut
log.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to get subvolume snapshot info %s %s in fs %s with error %s", "failed to get subvolume snapshot info %s %s in fs %s with error %s",
string(volID), s.VolID,
string(snapID), s.SnapshotID,
vo.FsName, s.FsName,
err) err)
return snap, err return snap, err
@ -126,21 +155,21 @@ func (vo *VolumeOptions) GetSnapshotInfo(ctx context.Context, snapID, volID fsut
return snap, nil return snap, nil
} }
func (vo *VolumeOptions) ProtectSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error { // ProtectSnapshot protects the snapshot of the subvolume.
func (s *snapshotClient) ProtectSnapshot(ctx context.Context) error {
// If "snapshot-autoprotect" feature is present, The ProtectSnapshot // If "snapshot-autoprotect" feature is present, The ProtectSnapshot
// call should be treated as a no-op. // call should be treated as a no-op.
if checkSubvolumeHasFeature(autoProtect, vo.Features) { if checkSubvolumeHasFeature(autoProtect, s.Features) {
return nil return nil
} }
fsa, err := vo.conn.GetFSAdmin() fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
err = fsa.ProtectSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), err = fsa.ProtectSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID)
string(snapID))
if err != nil { if err != nil {
if errors.Is(err, rados.ErrObjectExists) { if errors.Is(err, rados.ErrObjectExists) {
return nil return nil
@ -148,9 +177,9 @@ func (vo *VolumeOptions) ProtectSnapshot(ctx context.Context, snapID, volID fsut
log.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to protect subvolume snapshot %s %s in fs %s with error: %s", "failed to protect subvolume snapshot %s %s in fs %s with error: %s",
string(volID), s.VolID,
string(snapID), s.SnapshotID,
vo.FsName, s.FsName,
err) err)
return err return err
@ -159,21 +188,22 @@ func (vo *VolumeOptions) ProtectSnapshot(ctx context.Context, snapID, volID fsut
return nil return nil
} }
func (vo *VolumeOptions) UnprotectSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error { // UnprotectSnapshot unprotects the snapshot of the subvolume.
func (s *snapshotClient) UnprotectSnapshot(ctx context.Context) error {
// If "snapshot-autoprotect" feature is present, The UnprotectSnapshot // If "snapshot-autoprotect" feature is present, The UnprotectSnapshot
// call should be treated as a no-op. // call should be treated as a no-op.
if checkSubvolumeHasFeature(autoProtect, vo.Features) { if checkSubvolumeHasFeature(autoProtect, s.Features) {
return nil return nil
} }
fsa, err := vo.conn.GetFSAdmin() fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
err = fsa.UnprotectSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), err = fsa.UnprotectSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID,
string(snapID)) s.SnapshotID)
if err != nil { if err != nil {
// In case the snap is already unprotected we get ErrSnapProtectionExist error code // In case the snap is already unprotected we get ErrSnapProtectionExist error code
// in that case we are safe and we could discard this error. // in that case we are safe and we could discard this error.
@ -183,9 +213,9 @@ func (vo *VolumeOptions) UnprotectSnapshot(ctx context.Context, snapID, volID fs
log.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to unprotect subvolume snapshot %s %s in fs %s with error: %s", "failed to unprotect subvolume snapshot %s %s in fs %s with error: %s",
string(volID), s.VolID,
string(snapID), s.SnapshotID,
vo.FsName, s.FsName,
err) err)
return err return err
@ -194,33 +224,33 @@ func (vo *VolumeOptions) UnprotectSnapshot(ctx context.Context, snapID, volID fs
return nil return nil
} }
func (vo *VolumeOptions) cloneSnapshot( // CloneSnapshot clones the snapshot of the subvolume.
func (s *snapshotClient) CloneSnapshot(
ctx context.Context, ctx context.Context,
volID, snapID, cloneID fsutil.VolumeID, cloneSubVol *SubVolume,
cloneVolOptions *VolumeOptions,
) error { ) error {
fsa, err := vo.conn.GetFSAdmin() fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin: %s", err) log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
return err return err
} }
co := &admin.CloneOptions{ co := &admin.CloneOptions{
TargetGroup: cloneVolOptions.SubvolumeGroup, TargetGroup: cloneSubVol.SubvolumeGroup,
} }
if cloneVolOptions.Pool != "" { if cloneSubVol.Pool != "" {
co.PoolLayout = cloneVolOptions.Pool co.PoolLayout = cloneSubVol.Pool
} }
err = fsa.CloneSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID), string(cloneID), co) err = fsa.CloneSubVolumeSnapshot(s.FsName, s.SubvolumeGroup, s.VolID, s.SnapshotID, cloneSubVol.VolID, co)
if err != nil { if err != nil {
log.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to clone subvolume snapshot %s %s in fs %s with error: %s", "failed to clone subvolume snapshot %s %s in fs %s with error: %s",
string(volID), s.VolID,
string(snapID), s.SnapshotID,
string(cloneID), cloneSubVol.VolID,
vo.FsName, s.FsName,
err) err)
if errors.Is(err, rados.ErrNotFound) { if errors.Is(err, rados.ErrNotFound) {
return cerrors.ErrVolumeNotFound return cerrors.ErrVolumeNotFound

View File

@ -53,20 +53,75 @@ type Subvolume struct {
Features []string Features []string
} }
// SubVolumeClient is the interface that holds the signature of subvolume methods
// that interacts with CephFS subvolume API's.
type SubVolumeClient interface {
// GetVolumeRootPathCeph returns the root path of the subvolume.
GetVolumeRootPathCeph(ctx context.Context) (string, error)
// CreateVolume creates a subvolume.
CreateVolume(ctx context.Context) error
// GetSubVolumeInfo returns the subvolume information.
GetSubVolumeInfo(ctx context.Context) (*Subvolume, error)
// ExpandVolume expands the volume if the requested size is greater than
// the subvolume size.
ExpandVolume(ctx context.Context, bytesQuota int64) error
// ResizeVolume resizes the volume.
ResizeVolume(ctx context.Context, bytesQuota int64) error
// PurgSubVolume removes the subvolume.
PurgeVolume(ctx context.Context, force bool) error
// CreateCloneFromSubVolume creates a clone from the subvolume.
CreateCloneFromSubvolume(ctx context.Context, parentvolOpt *SubVolume) error
// GetCloneState returns the clone state of the subvolume.
GetCloneState(ctx context.Context) (cephFSCloneState, error)
// CreateCloneFromSnapshot creates a clone from the subvolume snapshot.
CreateCloneFromSnapshot(ctx context.Context, snap Snapshot) error
// CleanupSnapshotFromSubvolume removes the snapshot from the subvolume.
CleanupSnapshotFromSubvolume(ctx context.Context, parentVol *SubVolume) error
}
// subVolumeClient implements SubVolumeClient interface.
type subVolumeClient struct {
*SubVolume // Embedded SubVolume struct.
clusterID string // Cluster ID to check subvolumegroup and resize functionality.
conn *util.ClusterConnection // Cluster connection.
}
// SubVolume holds the information about the subvolume.
type SubVolume struct {
VolID string // subvolume id.
FsName string // filesystem name.
SubvolumeGroup string // subvolume group name where subvolume will be created.
Pool string // pool name where subvolume will be created.
Features []string // subvolume features.
Size int64 // subvolume size.
}
// NewSubVolume returns a new subvolume client.
func NewSubVolume(conn *util.ClusterConnection, vol *SubVolume, clusterID string) SubVolumeClient {
return &subVolumeClient{
SubVolume: vol,
clusterID: clusterID,
conn: conn,
}
}
// GetVolumeRootPathCephDeprecated returns the root path of the subvolume.
func GetVolumeRootPathCephDeprecated(volID fsutil.VolumeID) string { func GetVolumeRootPathCephDeprecated(volID fsutil.VolumeID) string {
return path.Join("/", "csi-volumes", string(volID)) return path.Join("/", "csi-volumes", string(volID))
} }
func (vo *VolumeOptions) GetVolumeRootPathCeph(ctx context.Context, volID fsutil.VolumeID) (string, error) { // GetVolumeRootPathCeph returns the root path of the subvolume.
fsa, err := vo.conn.GetFSAdmin() func (s *subVolumeClient) GetVolumeRootPathCeph(ctx context.Context) (string, error) {
fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin err %s", err) log.ErrorLog(ctx, "could not get FSAdmin err %s", err)
return "", err return "", err
} }
svPath, err := fsa.SubVolumePath(vo.FsName, vo.SubvolumeGroup, string(volID)) svPath, err := fsa.SubVolumePath(s.FsName, s.SubvolumeGroup, s.VolID)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to get the rootpath for the vol %s: %s", string(volID), err) log.ErrorLog(ctx, "failed to get the rootpath for the vol %s: %s", s.VolID, err)
if errors.Is(err, rados.ErrNotFound) { if errors.Is(err, rados.ErrNotFound) {
return "", util.JoinErrors(cerrors.ErrVolumeNotFound, err) return "", util.JoinErrors(cerrors.ErrVolumeNotFound, err)
} }
@ -77,17 +132,18 @@ func (vo *VolumeOptions) GetVolumeRootPathCeph(ctx context.Context, volID fsutil
return svPath, nil return svPath, nil
} }
func (vo *VolumeOptions) GetSubVolumeInfo(ctx context.Context, volID fsutil.VolumeID) (*Subvolume, error) { // GetSubVolumeInfo returns the subvolume information.
fsa, err := vo.conn.GetFSAdmin() func (s *subVolumeClient) GetSubVolumeInfo(ctx context.Context) (*Subvolume, error) {
fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err) log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", s.FsName, err)
return nil, err return nil, err
} }
info, err := fsa.SubVolumeInfo(vo.FsName, vo.SubvolumeGroup, string(volID)) info, err := fsa.SubVolumeInfo(s.FsName, s.SubvolumeGroup, s.VolID)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to get subvolume info for the vol %s: %s", string(volID), err) log.ErrorLog(ctx, "failed to get subvolume info for the vol %s: %s", s.VolID, err)
if errors.Is(err, rados.ErrNotFound) { if errors.Is(err, rados.ErrNotFound) {
return nil, cerrors.ErrVolumeNotFound return nil, cerrors.ErrVolumeNotFound
} }
@ -111,7 +167,7 @@ func (vo *VolumeOptions) GetSubVolumeInfo(ctx context.Context, volID fsutil.Volu
// or nil (in case the subvolume is in snapshot-retained state), // or nil (in case the subvolume is in snapshot-retained state),
// just continue without returning quota information. // just continue without returning quota information.
if !(info.BytesQuota == fsAdmin.Infinite || info.State == fsAdmin.StateSnapRetained) { if !(info.BytesQuota == fsAdmin.Infinite || info.State == fsAdmin.StateSnapRetained) {
return nil, fmt.Errorf("subvolume %s has unsupported quota: %v", string(volID), info.BytesQuota) return nil, fmt.Errorf("subvolume %s has unsupported quota: %v", s.VolID, info.BytesQuota)
} }
} else { } else {
subvol.BytesQuota = int64(bc) subvol.BytesQuota = int64(bc)
@ -140,50 +196,52 @@ type localClusterState struct {
subVolumeGroupCreated bool subVolumeGroupCreated bool
} }
func CreateVolume(ctx context.Context, volOptions *VolumeOptions, volID fsutil.VolumeID, bytesQuota int64) error { // CreateVolume creates a subvolume.
// verify if corresponding ClusterID key is present in the map, func (s *subVolumeClient) CreateVolume(ctx context.Context) error {
// verify if corresponding clusterID key is present in the map,
// and if not, initialize with default values(false). // and if not, initialize with default values(false).
if _, keyPresent := clusterAdditionalInfo[volOptions.ClusterID]; !keyPresent { if _, keyPresent := clusterAdditionalInfo[s.clusterID]; !keyPresent {
clusterAdditionalInfo[volOptions.ClusterID] = &localClusterState{} clusterAdditionalInfo[s.clusterID] = &localClusterState{}
} }
ca, err := volOptions.conn.GetFSAdmin() ca, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin, can not create subvolume %s: %s", string(volID), err) log.ErrorLog(ctx, "could not get FSAdmin, can not create subvolume %s: %s", s.VolID, err)
return err return err
} }
// create subvolumegroup if not already created for the cluster. // create subvolumegroup if not already created for the cluster.
if !clusterAdditionalInfo[volOptions.ClusterID].subVolumeGroupCreated { if !clusterAdditionalInfo[s.clusterID].subVolumeGroupCreated {
opts := fsAdmin.SubVolumeGroupOptions{} opts := fsAdmin.SubVolumeGroupOptions{}
err = ca.CreateSubVolumeGroup(volOptions.FsName, volOptions.SubvolumeGroup, &opts) err = ca.CreateSubVolumeGroup(s.FsName, s.SubvolumeGroup, &opts)
if err != nil { if err != nil {
log.ErrorLog( log.ErrorLog(
ctx, ctx,
"failed to create subvolume group %s, for the vol %s: %s", "failed to create subvolume group %s, for the vol %s: %s",
volOptions.SubvolumeGroup, s.SubvolumeGroup,
string(volID), s.VolID,
err) err)
return err return err
} }
log.DebugLog(ctx, "cephfs: created subvolume group %s", volOptions.SubvolumeGroup) log.DebugLog(ctx, "cephfs: created subvolume group %s", s.SubvolumeGroup)
clusterAdditionalInfo[volOptions.ClusterID].subVolumeGroupCreated = true clusterAdditionalInfo[s.clusterID].subVolumeGroupCreated = true
} }
opts := fsAdmin.SubVolumeOptions{ opts := fsAdmin.SubVolumeOptions{
Size: fsAdmin.ByteCount(bytesQuota), Size: fsAdmin.ByteCount(s.Size),
Mode: modeAllRWX, Mode: modeAllRWX,
} }
if volOptions.Pool != "" { if s.Pool != "" {
opts.PoolLayout = volOptions.Pool opts.PoolLayout = s.Pool
} }
fmt.Println("this is for debugging ")
// FIXME: check if the right credentials are used ("-n", cephEntityClientPrefix + cr.ID) // FIXME: check if the right credentials are used ("-n", cephEntityClientPrefix + cr.ID)
err = ca.CreateSubVolume(volOptions.FsName, volOptions.SubvolumeGroup, string(volID), &opts) err = ca.CreateSubVolume(s.FsName, s.SubvolumeGroup, s.VolID, &opts)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to create subvolume %s in fs %s: %s", string(volID), volOptions.FsName, err) log.ErrorLog(ctx, "failed to create subvolume %s in fs %s: %s", s.VolID, s.FsName, err)
return err return err
} }
@ -193,16 +251,16 @@ func CreateVolume(ctx context.Context, volOptions *VolumeOptions, volID fsutil.V
// ExpandVolume will expand the volume if the requested size is greater than // ExpandVolume will expand the volume if the requested size is greater than
// the subvolume size. // the subvolume size.
func (vo *VolumeOptions) ExpandVolume(ctx context.Context, volID fsutil.VolumeID, bytesQuota int64) error { func (s *subVolumeClient) ExpandVolume(ctx context.Context, bytesQuota int64) error {
// get the subvolume size for comparison with the requested size. // get the subvolume size for comparison with the requested size.
info, err := vo.GetSubVolumeInfo(ctx, volID) info, err := s.GetSubVolumeInfo(ctx)
if err != nil { if err != nil {
return err return err
} }
// resize if the requested size is greater than the current size. // resize if the requested size is greater than the current size.
if vo.Size > info.BytesQuota { if s.Size > info.BytesQuota {
log.DebugLog(ctx, "clone %s size %d is greater than requested size %d", volID, info.BytesQuota, bytesQuota) log.DebugLog(ctx, "clone %s size %d is greater than requested size %d", s.VolID, info.BytesQuota, bytesQuota)
err = vo.ResizeVolume(ctx, volID, bytesQuota) err = s.ResizeVolume(ctx, bytesQuota)
} }
return err return err
@ -211,45 +269,47 @@ func (vo *VolumeOptions) ExpandVolume(ctx context.Context, volID fsutil.VolumeID
// ResizeVolume will try to use ceph fs subvolume resize command to resize the // ResizeVolume will try to use ceph fs subvolume resize command to resize the
// subvolume. If the command is not available as a fallback it will use // subvolume. If the command is not available as a fallback it will use
// CreateVolume to resize the subvolume. // CreateVolume to resize the subvolume.
func (vo *VolumeOptions) ResizeVolume(ctx context.Context, volID fsutil.VolumeID, bytesQuota int64) error { func (s *subVolumeClient) ResizeVolume(ctx context.Context, bytesQuota int64) error {
// keyPresent checks whether corresponding clusterID key is present in clusterAdditionalInfo // keyPresent checks whether corresponding clusterID key is present in clusterAdditionalInfo
var keyPresent bool var keyPresent bool
// verify if corresponding ClusterID key is present in the map, // verify if corresponding clusterID key is present in the map,
// and if not, initialize with default values(false). // and if not, initialize with default values(false).
if _, keyPresent = clusterAdditionalInfo[vo.ClusterID]; !keyPresent { if _, keyPresent = clusterAdditionalInfo[s.clusterID]; !keyPresent {
clusterAdditionalInfo[vo.ClusterID] = &localClusterState{} clusterAdditionalInfo[s.clusterID] = &localClusterState{}
} }
// resize subvolume when either it's supported, or when corresponding // resize subvolume when either it's supported, or when corresponding
// clusterID key was not present. // clusterID key was not present.
if clusterAdditionalInfo[vo.ClusterID].resizeState == unknown || if clusterAdditionalInfo[s.clusterID].resizeState == unknown ||
clusterAdditionalInfo[vo.ClusterID].resizeState == supported { clusterAdditionalInfo[s.clusterID].resizeState == supported {
fsa, err := vo.conn.GetFSAdmin() fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin, can not resize volume %s:", vo.FsName, err) log.ErrorLog(ctx, "could not get FSAdmin, can not resize volume %s:", s.FsName, err)
return err return err
} }
_, err = fsa.ResizeSubVolume(vo.FsName, vo.SubvolumeGroup, string(volID), fsAdmin.ByteCount(bytesQuota), true) _, err = fsa.ResizeSubVolume(s.FsName, s.SubvolumeGroup, s.VolID, fsAdmin.ByteCount(bytesQuota), true)
if err == nil { if err == nil {
clusterAdditionalInfo[vo.ClusterID].resizeState = supported clusterAdditionalInfo[s.clusterID].resizeState = supported
return nil return nil
} }
var invalid fsAdmin.NotImplementedError var invalid fsAdmin.NotImplementedError
// In case the error is other than invalid command return error to the caller. // In case the error is other than invalid command return error to the caller.
if !errors.As(err, &invalid) { if !errors.As(err, &invalid) {
log.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err) log.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", s.VolID, s.FsName, err)
return err return err
} }
} }
clusterAdditionalInfo[vo.ClusterID].resizeState = unsupported clusterAdditionalInfo[s.clusterID].resizeState = unsupported
s.Size = bytesQuota
return CreateVolume(ctx, vo, volID, bytesQuota) return s.CreateVolume(ctx)
} }
func (vo *VolumeOptions) PurgeVolume(ctx context.Context, volID fsutil.VolumeID, force bool) error { // PurgSubVolume removes the subvolume.
fsa, err := vo.conn.GetFSAdmin() func (s *subVolumeClient) PurgeVolume(ctx context.Context, force bool) error {
fsa, err := s.conn.GetFSAdmin()
if err != nil { if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin %s:", err) log.ErrorLog(ctx, "could not get FSAdmin %s:", err)
@ -259,13 +319,13 @@ func (vo *VolumeOptions) PurgeVolume(ctx context.Context, volID fsutil.VolumeID,
opt := fsAdmin.SubVolRmFlags{} opt := fsAdmin.SubVolRmFlags{}
opt.Force = force opt.Force = force
if checkSubvolumeHasFeature("snapshot-retention", vo.Features) { if checkSubvolumeHasFeature("snapshot-retention", s.Features) {
opt.RetainSnapshots = true opt.RetainSnapshots = true
} }
err = fsa.RemoveSubVolumeWithFlags(vo.FsName, vo.SubvolumeGroup, string(volID), opt) err = fsa.RemoveSubVolumeWithFlags(s.FsName, s.SubvolumeGroup, s.VolID, opt)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to purge subvolume %s in fs %s: %s", string(volID), vo.FsName, err) log.ErrorLog(ctx, "failed to purge subvolume %s in fs %s: %s", s.VolID, s.FsName, err)
if strings.Contains(err.Error(), cerrors.VolumeNotEmpty) { if strings.Contains(err.Error(), cerrors.VolumeNotEmpty) {
return util.JoinErrors(cerrors.ErrVolumeHasSnapshots, err) return util.JoinErrors(cerrors.ErrVolumeHasSnapshots, err)
} }

View File

@ -17,8 +17,8 @@ limitations under the License.
package cephfs package cephfs
import ( import (
"github.com/ceph/ceph-csi/internal/cephfs/core"
"github.com/ceph/ceph-csi/internal/cephfs/mounter" "github.com/ceph/ceph-csi/internal/cephfs/mounter"
"github.com/ceph/ceph-csi/internal/cephfs/store"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/journal"
@ -87,9 +87,9 @@ func (fs *Driver) Run(conf *util.Config) {
CSIInstanceID = conf.InstanceID CSIInstanceID = conf.InstanceID
} }
// Create an instance of the volume journal // Create an instance of the volume journal
core.VolJournal = journal.NewCSIVolumeJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace) store.VolJournal = journal.NewCSIVolumeJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
core.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace) store.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
// Initialize default library driver // Initialize default library driver
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID) fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)

View File

@ -25,7 +25,7 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/ceph/ceph-csi/internal/cephfs/core" "github.com/ceph/ceph-csi/internal/cephfs/store"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log" "github.com/ceph/ceph-csi/internal/util/log"
) )
@ -47,7 +47,7 @@ var (
type FuseMounter struct{} type FuseMounter struct{}
func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *core.VolumeOptions) error { func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *store.VolumeOptions) error {
args := []string{ args := []string{
mountPoint, mountPoint,
"-m", volOptions.Monitors, "-m", volOptions.Monitors,
@ -99,7 +99,7 @@ func (m *FuseMounter) Mount(
ctx context.Context, ctx context.Context,
mountPoint string, mountPoint string,
cr *util.Credentials, cr *util.Credentials,
volOptions *core.VolumeOptions) error { volOptions *store.VolumeOptions) error {
if err := util.CreateMountPoint(mountPoint); err != nil { if err := util.CreateMountPoint(mountPoint); err != nil {
return err return err
} }

View File

@ -20,7 +20,7 @@ import (
"context" "context"
"fmt" "fmt"
"github.com/ceph/ceph-csi/internal/cephfs/core" "github.com/ceph/ceph-csi/internal/cephfs/store"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
) )
@ -31,7 +31,7 @@ const (
type KernelMounter struct{} type KernelMounter struct{}
func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *core.VolumeOptions) error { func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *store.VolumeOptions) error {
if err := execCommandErr(ctx, "modprobe", "ceph"); err != nil { if err := execCommandErr(ctx, "modprobe", "ceph"); err != nil {
return err return err
} }
@ -63,7 +63,7 @@ func (m *KernelMounter) Mount(
ctx context.Context, ctx context.Context,
mountPoint string, mountPoint string,
cr *util.Credentials, cr *util.Credentials,
volOptions *core.VolumeOptions) error { volOptions *store.VolumeOptions) error {
if err := util.CreateMountPoint(mountPoint); err != nil { if err := util.CreateMountPoint(mountPoint); err != nil {
return err return err
} }

View File

@ -23,7 +23,7 @@ import (
"os/exec" "os/exec"
"strings" "strings"
"github.com/ceph/ceph-csi/internal/cephfs/core" "github.com/ceph/ceph-csi/internal/cephfs/store"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log" "github.com/ceph/ceph-csi/internal/util/log"
) )
@ -99,11 +99,11 @@ func LoadAvailableMounters(conf *util.Config) error {
} }
type VolumeMounter interface { type VolumeMounter interface {
Mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *core.VolumeOptions) error Mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *store.VolumeOptions) error
Name() string Name() string
} }
func New(volOptions *core.VolumeOptions) (VolumeMounter, error) { func New(volOptions *store.VolumeOptions) (VolumeMounter, error) {
// Get the mounter from the configuration // Get the mounter from the configuration
wantMounter := volOptions.Mounter wantMounter := volOptions.Mounter

View File

@ -23,9 +23,9 @@ import (
"os" "os"
"strings" "strings"
"github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
"github.com/ceph/ceph-csi/internal/cephfs/mounter" "github.com/ceph/ceph-csi/internal/cephfs/mounter"
"github.com/ceph/ceph-csi/internal/cephfs/store"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
csicommon "github.com/ceph/ceph-csi/internal/csi-common" csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
@ -46,7 +46,7 @@ type NodeServer struct {
} }
func getCredentialsForVolume( func getCredentialsForVolume(
volOptions *core.VolumeOptions, volOptions *store.VolumeOptions,
req *csi.NodeStageVolumeRequest) (*util.Credentials, error) { req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
var ( var (
err error err error
@ -77,7 +77,7 @@ func getCredentialsForVolume(
func (ns *NodeServer) NodeStageVolume( func (ns *NodeServer) NodeStageVolume(
ctx context.Context, ctx context.Context,
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) { req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
var volOptions *core.VolumeOptions var volOptions *store.VolumeOptions
if err := util.ValidateNodeStageVolumeRequest(req); err != nil { if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
return nil, err return nil, err
} }
@ -94,21 +94,21 @@ func (ns *NodeServer) NodeStageVolume(
} }
defer ns.VolumeLocks.Release(req.GetVolumeId()) defer ns.VolumeLocks.Release(req.GetVolumeId())
volOptions, _, err := core.NewVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets()) volOptions, _, err := store.NewVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
if err != nil { if err != nil {
if !errors.Is(err, cerrors.ErrInvalidVolID) { if !errors.Is(err, cerrors.ErrInvalidVolID) {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
// gets mon IPs from the supplied cluster info // gets mon IPs from the supplied cluster info
volOptions, _, err = core.NewVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext()) volOptions, _, err = store.NewVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
if err != nil { if err != nil {
if !errors.Is(err, cerrors.ErrNonStaticVolume) { if !errors.Is(err, cerrors.ErrNonStaticVolume) {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
// get mon IPs from the volume context // get mon IPs from the volume context
volOptions, _, err = core.NewVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(), volOptions, _, err = store.NewVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(),
req.GetSecrets()) req.GetSecrets())
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
@ -142,7 +142,7 @@ func (ns *NodeServer) NodeStageVolume(
return &csi.NodeStageVolumeResponse{}, nil return &csi.NodeStageVolumeResponse{}, nil
} }
func (*NodeServer) mount(ctx context.Context, volOptions *core.VolumeOptions, req *csi.NodeStageVolumeRequest) error { func (*NodeServer) mount(ctx context.Context, volOptions *store.VolumeOptions, req *csi.NodeStageVolumeRequest) error {
stagingTargetPath := req.GetStagingTargetPath() stagingTargetPath := req.GetStagingTargetPath()
volID := fsutil.VolumeID(req.GetVolumeId()) volID := fsutil.VolumeID(req.GetVolumeId())

View File

@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package core package store
import ( import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/journal" "github.com/ceph/ceph-csi/internal/journal"
@ -95,16 +96,16 @@ func CheckVolExists(ctx context.Context,
} }
imageUUID := imageData.ImageUUID imageUUID := imageData.ImageUUID
vid.FsSubvolName = imageData.ImageAttributes.ImageName vid.FsSubvolName = imageData.ImageAttributes.ImageName
volOptions.VolID = vid.FsSubvolName
vol := core.NewSubVolume(volOptions.conn, &volOptions.SubVolume, volOptions.ClusterID)
if sID != nil || pvID != nil { if sID != nil || pvID != nil {
cloneState, cloneStateErr := volOptions.getCloneState(ctx, fsutil.VolumeID(vid.FsSubvolName)) cloneState, cloneStateErr := vol.GetCloneState(ctx)
if cloneStateErr != nil { if cloneStateErr != nil {
if errors.Is(cloneStateErr, cerrors.ErrVolumeNotFound) { if errors.Is(cloneStateErr, cerrors.ErrVolumeNotFound) {
if pvID != nil { if pvID != nil {
err = cleanupCloneFromSubvolumeSnapshot( err = vol.CleanupSnapshotFromSubvolume(
ctx, fsutil.VolumeID(pvID.FsSubvolName), ctx, &parentVolOpt.SubVolume)
fsutil.VolumeID(vid.FsSubvolName),
parentVolOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -117,29 +118,27 @@ func CheckVolExists(ctx context.Context,
return nil, err return nil, err
} }
if cloneState == cephFSCloneInprogress { if cloneState == core.CephFSCloneInprogress {
return nil, cerrors.ErrCloneInProgress return nil, cerrors.ErrCloneInProgress
} }
if cloneState == cephFSClonePending { if cloneState == core.CephFSClonePending {
return nil, cerrors.ErrClonePending return nil, cerrors.ErrClonePending
} }
if cloneState == cephFSCloneFailed { if cloneState == core.CephFSCloneFailed {
log.ErrorLog(ctx, log.ErrorLog(ctx,
"clone failed, deleting subvolume clone. vol=%s, subvol=%s subvolgroup=%s", "clone failed, deleting subvolume clone. vol=%s, subvol=%s subvolgroup=%s",
volOptions.FsName, volOptions.FsName,
vid.FsSubvolName, vid.FsSubvolName,
volOptions.SubvolumeGroup) volOptions.SubvolumeGroup)
err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vid.FsSubvolName), true) err = vol.PurgeVolume(ctx, true)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err) log.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
return nil, err return nil, err
} }
if pvID != nil { if pvID != nil {
err = cleanupCloneFromSubvolumeSnapshot( err = vol.CleanupSnapshotFromSubvolume(
ctx, fsutil.VolumeID(pvID.FsSubvolName), ctx, &parentVolOpt.SubVolume)
fsutil.VolumeID(vid.FsSubvolName),
parentVolOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -149,21 +148,18 @@ func CheckVolExists(ctx context.Context,
return nil, err return nil, err
} }
if cloneState != cephFSCloneComplete { if cloneState != core.CephFSCloneComplete {
return nil, fmt.Errorf("clone is not in complete state for %s", vid.FsSubvolName) return nil, fmt.Errorf("clone is not in complete state for %s", vid.FsSubvolName)
} }
} }
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vid.FsSubvolName)) volOptions.RootPath, err = vol.GetVolumeRootPathCeph(ctx)
if err != nil { if err != nil {
if errors.Is(err, cerrors.ErrVolumeNotFound) { if errors.Is(err, cerrors.ErrVolumeNotFound) {
// If the subvolume is not present, cleanup the stale snapshot // If the subvolume is not present, cleanup the stale snapshot
// created for clone. // created for clone.
if parentVolOpt != nil && pvID != nil { if parentVolOpt != nil && pvID != nil {
err = cleanupCloneFromSubvolumeSnapshot( err = vol.CleanupSnapshotFromSubvolume(
ctx, ctx, &parentVolOpt.SubVolume)
fsutil.VolumeID(pvID.FsSubvolName),
fsutil.VolumeID(vid.FsSubvolName),
parentVolOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -194,11 +190,8 @@ func CheckVolExists(ctx context.Context,
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName) vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
if parentVolOpt != nil && pvID != nil { if parentVolOpt != nil && pvID != nil {
err = cleanupCloneFromSubvolumeSnapshot( err = vol.CleanupSnapshotFromSubvolume(
ctx, ctx, &parentVolOpt.SubVolume)
fsutil.VolumeID(pvID.FsSubvolName),
fsutil.VolumeID(vid.FsSubvolName),
parentVolOpt)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -280,7 +273,7 @@ func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[strin
if err != nil { if err != nil {
return nil, err return nil, err
} }
volOptions.VolID = vid.FsSubvolName
// generate the volume ID to return to the CO system // generate the volume ID to return to the CO system
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID, vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
"", volOptions.ClusterID, imageUUID, fsutil.VolIDVersion) "", volOptions.ClusterID, imageUUID, fsutil.VolIDVersion)
@ -300,7 +293,7 @@ func ReserveSnap(
ctx context.Context, ctx context.Context,
volOptions *VolumeOptions, volOptions *VolumeOptions,
parentSubVolName string, parentSubVolName string,
snap *CephfsSnapshot, snap *SnapshotOption,
cr *util.Credentials) (*SnapshotIdentifier, error) { cr *util.Credentials) (*SnapshotIdentifier, error) {
var ( var (
vid SnapshotIdentifier vid SnapshotIdentifier
@ -373,9 +366,8 @@ hence safe to garbage collect.
func CheckSnapExists( func CheckSnapExists(
ctx context.Context, ctx context.Context,
volOptions *VolumeOptions, volOptions *VolumeOptions,
parentSubVolName string, snap *SnapshotOption,
snap *CephfsSnapshot, cr *util.Credentials) (*SnapshotIdentifier, *core.SnapshotInfo, error) {
cr *util.Credentials) (*SnapshotIdentifier, *SnapshotInfo, error) {
// Connect to cephfs' default radosNamespace (csi) // Connect to cephfs' default radosNamespace (csi)
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr) j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
if err != nil { if err != nil {
@ -384,7 +376,7 @@ func CheckSnapExists(
defer j.Destroy() defer j.Destroy()
snapData, err := j.CheckReservation( snapData, err := j.CheckReservation(
ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, parentSubVolName, "") ctx, volOptions.MetadataPool, snap.RequestName, snap.NamePrefix, volOptions.VolID, "")
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }
@ -395,7 +387,8 @@ func CheckSnapExists(
snapUUID := snapData.ImageUUID snapUUID := snapData.ImageUUID
snapID := snapData.ImageAttributes.ImageName snapID := snapData.ImageAttributes.ImageName
sid.FsSnapshotName = snapData.ImageAttributes.ImageName sid.FsSnapshotName = snapData.ImageAttributes.ImageName
snapInfo, err := volOptions.GetSnapshotInfo(ctx, fsutil.VolumeID(snapID), fsutil.VolumeID(parentSubVolName)) snapClient := core.NewSnapshot(volOptions.conn, snapID, &volOptions.SubVolume)
snapInfo, err := snapClient.GetSnapshotInfo(ctx)
if err != nil { if err != nil {
if errors.Is(err, cerrors.ErrSnapNotFound) { if errors.Is(err, cerrors.ErrSnapNotFound) {
err = j.UndoReservation(ctx, volOptions.MetadataPool, err = j.UndoReservation(ctx, volOptions.MetadataPool,
@ -409,7 +402,7 @@ func CheckSnapExists(
defer func() { defer func() {
if err != nil { if err != nil {
err = volOptions.DeleteSnapshot(ctx, fsutil.VolumeID(snapID), fsutil.VolumeID(parentSubVolName)) err = snapClient.DeleteSnapshot(ctx)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err) log.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err)
@ -435,7 +428,7 @@ func CheckSnapExists(
return nil, nil, err return nil, nil, err
} }
log.DebugLog(ctx, "Found existing snapshot (%s) with subvolume name (%s) for request (%s)", log.DebugLog(ctx, "Found existing snapshot (%s) with subvolume name (%s) for request (%s)",
snapData.ImageAttributes.RequestName, parentSubVolName, sid.FsSnapshotName) snapData.ImageAttributes.RequestName, volOptions.VolID, sid.FsSnapshotName)
return sid, &snapInfo, nil return sid, &snapInfo, nil
} }

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package core package store
import ( import (
"context" "context"
@ -25,6 +25,7 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi"
"github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors" cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util" fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util" "github.com/ceph/ceph-csi/internal/util"
@ -32,27 +33,23 @@ import (
) )
type VolumeOptions struct { type VolumeOptions struct {
core.SubVolume
TopologyPools *[]util.TopologyConstrainedPool TopologyPools *[]util.TopologyConstrainedPool
TopologyRequirement *csi.TopologyRequirement TopologyRequirement *csi.TopologyRequirement
Topology map[string]string Topology map[string]string
RequestName string RequestName string
NamePrefix string NamePrefix string
Size int64
ClusterID string ClusterID string
FsName string
FscID int64 FscID int64
MetadataPool string
// ReservedID represents the ID reserved for a subvolume // ReservedID represents the ID reserved for a subvolume
ReservedID string ReservedID string
MetadataPool string
Monitors string `json:"monitors"` Monitors string `json:"monitors"`
Pool string `json:"pool"`
RootPath string `json:"rootPath"` RootPath string `json:"rootPath"`
Mounter string `json:"mounter"` Mounter string `json:"mounter"`
ProvisionVolume bool `json:"provisionVolume"` ProvisionVolume bool `json:"provisionVolume"`
KernelMountOptions string `json:"kernelMountOptions"` KernelMountOptions string `json:"kernelMountOptions"`
FuseMountOptions string `json:"fuseMountOptions"` FuseMountOptions string `json:"fuseMountOptions"`
SubvolumeGroup string
Features []string
// conn is a connection to the Ceph cluster obtained from a ConnPool // conn is a connection to the Ceph cluster obtained from a ConnPool
conn *util.ClusterConnection conn *util.ClusterConnection
@ -180,6 +177,11 @@ func GetClusterInformation(options map[string]string) (*util.ClusterInfo, error)
return clusterData, nil return clusterData, nil
} }
// GetConnection returns the cluster connection.
func (vo *VolumeOptions) GetConnection() *util.ClusterConnection {
return vo.conn
}
// NewVolumeOptions generates a new instance of volumeOptions from the provided // NewVolumeOptions generates a new instance of volumeOptions from the provided
// CSI request parameters. // CSI request parameters.
func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest, func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest,
@ -230,7 +232,7 @@ func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVo
return nil, err return nil, err
} }
fs := NewFileSystem(opts.conn) fs := core.NewFileSystem(opts.conn)
opts.FscID, err = fs.GetFscID(ctx, opts.FsName) opts.FscID, err = fs.GetFscID(ctx, opts.FsName)
if err != nil { if err != nil {
return nil, err return nil, err
@ -281,6 +283,7 @@ func NewVolumeOptionsFromVolID(
} }
volOptions.ClusterID = vi.ClusterID volOptions.ClusterID = vi.ClusterID
vid.VolumeID = volID vid.VolumeID = volID
volOptions.VolID = volID
volOptions.FscID = vi.LocationID volOptions.FscID = vi.LocationID
if volOptions.Monitors, err = util.Mons(util.CsiConfigFile, vi.ClusterID); err != nil { if volOptions.Monitors, err = util.Mons(util.CsiConfigFile, vi.ClusterID); err != nil {
@ -309,7 +312,7 @@ func NewVolumeOptionsFromVolID(
} }
}() }()
fs := NewFileSystem(volOptions.conn) fs := core.NewFileSystem(volOptions.conn)
volOptions.FsName, err = fs.GetFsName(ctx, volOptions.FscID) volOptions.FsName, err = fs.GetFsName(ctx, volOptions.FscID)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
@ -358,8 +361,9 @@ func NewVolumeOptionsFromVolID(
} }
volOptions.ProvisionVolume = true volOptions.ProvisionVolume = true
volOptions.SubVolume.VolID = vid.FsSubvolName
info, err := volOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(vid.FsSubvolName)) vol := core.NewSubVolume(volOptions.conn, &volOptions.SubVolume, volOptions.ClusterID)
info, err := vol.GetSubVolumeInfo(ctx)
if err == nil { if err == nil {
volOptions.RootPath = info.Path volOptions.RootPath = info.Path
volOptions.Features = info.Features volOptions.Features = info.Features
@ -367,7 +371,7 @@ func NewVolumeOptionsFromVolID(
} }
if errors.Is(err, cerrors.ErrInvalidCommand) { if errors.Is(err, cerrors.ErrInvalidCommand) {
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vid.FsSubvolName)) volOptions.RootPath, err = vol.GetVolumeRootPathCeph(ctx)
} }
return &volOptions, &vid, err return &volOptions, &vid, err
@ -410,7 +414,7 @@ func NewVolumeOptionsFromMonitorList(
return nil, nil, err return nil, nil, err
} }
opts.RootPath = GetVolumeRootPathCephDeprecated(fsutil.VolumeID(volID)) opts.RootPath = core.GetVolumeRootPathCephDeprecated(fsutil.VolumeID(volID))
} else { } else {
if err = extractOption(&opts.RootPath, "rootPath", options); err != nil { if err = extractOption(&opts.RootPath, "rootPath", options); err != nil {
return nil, nil, err return nil, nil, err
@ -509,7 +513,7 @@ func NewVolumeOptionsFromStaticVolume(
func NewSnapshotOptionsFromID( func NewSnapshotOptionsFromID(
ctx context.Context, ctx context.Context,
snapID string, snapID string,
cr *util.Credentials) (*VolumeOptions, *SnapshotInfo, *SnapshotIdentifier, error) { cr *util.Credentials) (*VolumeOptions, *core.SnapshotInfo, *SnapshotIdentifier, error) {
var ( var (
vi util.CSIIdentifier vi util.CSIIdentifier
volOptions VolumeOptions volOptions VolumeOptions
@ -551,7 +555,7 @@ func NewSnapshotOptionsFromID(
} }
}() }()
fs := NewFileSystem(volOptions.conn) fs := core.NewFileSystem(volOptions.conn)
volOptions.FsName, err = fs.GetFsName(ctx, volOptions.FscID) volOptions.FsName, err = fs.GetFsName(ctx, volOptions.FscID)
if err != nil { if err != nil {
return &volOptions, nil, &sid, err return &volOptions, nil, &sid, err
@ -579,14 +583,17 @@ func NewSnapshotOptionsFromID(
sid.FsSnapshotName = imageAttributes.ImageName sid.FsSnapshotName = imageAttributes.ImageName
sid.FsSubvolName = imageAttributes.SourceName sid.FsSubvolName = imageAttributes.SourceName
subvolInfo, err := volOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(sid.FsSubvolName)) volOptions.SubVolume.VolID = sid.FsSubvolName
vol := core.NewSubVolume(volOptions.conn, &volOptions.SubVolume, volOptions.ClusterID)
subvolInfo, err := vol.GetSubVolumeInfo(ctx)
if err != nil { if err != nil {
return &volOptions, nil, &sid, err return &volOptions, nil, &sid, err
} }
volOptions.Features = subvolInfo.Features volOptions.Features = subvolInfo.Features
volOptions.Size = subvolInfo.BytesQuota volOptions.Size = subvolInfo.BytesQuota
snap := core.NewSnapshot(volOptions.conn, sid.FsSnapshotName, &volOptions.SubVolume)
info, err := volOptions.GetSnapshotInfo(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName)) info, err := snap.GetSnapshotInfo(ctx)
if err != nil { if err != nil {
return &volOptions, nil, &sid, err return &volOptions, nil, &sid, err
} }
@ -594,8 +601,17 @@ func NewSnapshotOptionsFromID(
return &volOptions, &info, &sid, nil return &volOptions, &info, &sid, nil
} }
func GenSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (snap *CephfsSnapshot, err error) { // SnapshotOption is a struct that holds the information about the snapshot.
cephfsSnap := &CephfsSnapshot{} type SnapshotOption struct {
ReservedID string // ID reserved for the snapshot.
RequestName string // Request name of the snapshot.
ClusterID string // Cluster ID of to identify ceph cluster connection information.
Monitors string // Monitors of the ceph cluster.
NamePrefix string // Name prefix of the snapshot.
}
func GenSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (*SnapshotOption, error) {
cephfsSnap := &SnapshotOption{}
cephfsSnap.RequestName = req.GetName() cephfsSnap.RequestName = req.GetName()
snapOptions := req.GetParameters() snapOptions := req.GetParameters()