cleanup: move core functions to core pkg

as we are refractoring the cephfs code,
Moving all the core functions to a new folder
/pkg called core. This will make things easier
to implement. For now onwards all the core
functionalities will be added to the core
package.

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2021-09-16 19:17:57 +05:30 committed by mergify[bot]
parent 64ade1d4c3
commit b1ef842640
15 changed files with 320 additions and 285 deletions

View File

@ -21,7 +21,9 @@ import (
"errors"
"fmt"
"github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
@ -53,11 +55,11 @@ type ControllerServer struct {
func (cs *ControllerServer) createBackingVolume(
ctx context.Context,
volOptions,
parentVolOpt *volumeOptions,
parentVolOpt *core.VolumeOptions,
vID,
pvID *volumeIdentifier,
sID *snapshotIdentifier) error {
pvID *core.VolumeIdentifier,
sID *core.SnapshotIdentifier) error {
var err error
if sID != nil {
if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil {
@ -67,7 +69,7 @@ func (cs *ControllerServer) createBackingVolume(
}
defer cs.OperationLocks.ReleaseRestoreLock(sID.SnapshotID)
err = createCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID)
err = core.CreateCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID)
if err != nil {
log.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
@ -83,14 +85,14 @@ func (cs *ControllerServer) createBackingVolume(
return status.Error(codes.Aborted, err.Error())
}
defer cs.OperationLocks.ReleaseCloneLock(pvID.VolumeID)
err = createCloneFromSubvolume(
err = core.CreateCloneFromSubvolume(
ctx,
volumeID(pvID.FsSubvolName),
volumeID(vID.FsSubvolName),
fsutil.VolumeID(pvID.FsSubvolName),
fsutil.VolumeID(vID.FsSubvolName),
volOptions,
parentVolOpt)
if err != nil {
log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", volumeID(pvID.FsSubvolName), err)
log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", fsutil.VolumeID(pvID.FsSubvolName), err)
return err
}
@ -98,7 +100,7 @@ func (cs *ControllerServer) createBackingVolume(
return nil
}
if err = createVolume(ctx, volOptions, volumeID(vID.FsSubvolName), volOptions.Size); err != nil {
if err = core.CreateVolume(ctx, volOptions, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size); err != nil {
log.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err)
return status.Error(codes.Internal, err.Error())
@ -110,7 +112,7 @@ func (cs *ControllerServer) createBackingVolume(
func checkContentSource(
ctx context.Context,
req *csi.CreateVolumeRequest,
cr *util.Credentials) (*volumeOptions, *volumeIdentifier, *snapshotIdentifier, error) {
cr *util.Credentials) (*core.VolumeOptions, *core.VolumeIdentifier, *core.SnapshotIdentifier, error) {
if req.VolumeContentSource == nil {
return nil, nil, nil, nil
}
@ -118,7 +120,7 @@ func checkContentSource(
switch volumeSource.Type.(type) {
case *csi.VolumeContentSource_Snapshot:
snapshotID := req.VolumeContentSource.GetSnapshot().GetSnapshotId()
volOpt, _, sid, err := newSnapshotOptionsFromID(ctx, snapshotID, cr)
volOpt, _, sid, err := core.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
if err != nil {
if errors.Is(err, cerrors.ErrSnapNotFound) {
return nil, nil, nil, status.Error(codes.NotFound, err.Error())
@ -131,7 +133,7 @@ func checkContentSource(
case *csi.VolumeContentSource_Volume:
// Find the volume using the provided VolumeID
volID := req.VolumeContentSource.GetVolume().GetVolumeId()
parentVol, pvID, err := newVolumeOptionsFromVolID(ctx, volID, nil, req.Secrets)
parentVol, pvID, err := core.NewVolumeOptionsFromVolID(ctx, volID, nil, req.Secrets)
if err != nil {
if !errors.Is(err, cerrors.ErrVolumeNotFound) {
return nil, nil, nil, status.Error(codes.NotFound, err.Error())
@ -177,7 +179,7 @@ func (cs *ControllerServer) CreateVolume(
}
defer cs.VolumeLocks.Release(requestName)
volOptions, err := newVolumeOptions(ctx, requestName, req, cr)
volOptions, err := core.NewVolumeOptions(ctx, requestName, req, cr)
if err != nil {
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
@ -197,9 +199,9 @@ func (cs *ControllerServer) CreateVolume(
defer parentVol.Destroy()
}
vID, err := checkVolExists(ctx, volOptions, parentVol, pvID, sID, cr)
vID, err := core.CheckVolExists(ctx, volOptions, parentVol, pvID, sID, cr)
if err != nil {
if isCloneRetryError(err) {
if cerrors.IsCloneRetryError(err) {
return nil, status.Error(codes.Aborted, err.Error())
}
@ -230,15 +232,15 @@ func (cs *ControllerServer) CreateVolume(
}
// Reservation
vID, err = reserveVol(ctx, volOptions, secret)
vID, err = core.ReserveVol(ctx, volOptions, secret)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer func() {
if err != nil {
if !isCloneRetryError(err) {
errDefer := undoVolReservation(ctx, volOptions, *vID, secret)
if !cerrors.IsCloneRetryError(err) {
errDefer := core.UndoVolReservation(ctx, volOptions, *vID, secret)
if errDefer != nil {
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
requestName, errDefer)
@ -250,16 +252,16 @@ func (cs *ControllerServer) CreateVolume(
// Create a volume
err = cs.createBackingVolume(ctx, volOptions, parentVol, vID, pvID, sID)
if err != nil {
if isCloneRetryError(err) {
if cerrors.IsCloneRetryError(err) {
return nil, status.Error(codes.Aborted, err.Error())
}
return nil, err
}
volOptions.RootPath, err = volOptions.getVolumeRootPathCeph(ctx, volumeID(vID.FsSubvolName))
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vID.FsSubvolName))
if err != nil {
purgeErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true)
purgeErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), true)
if purgeErr != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr)
// All errors other than ErrVolumeNotFound should return an error back to the caller
@ -311,7 +313,7 @@ func (cs *ControllerServer) DeleteVolume(
return nil, err
}
volID := volumeID(req.GetVolumeId())
volID := fsutil.VolumeID(req.GetVolumeId())
secrets := req.GetSecrets()
// lock out parallel delete operations
@ -331,7 +333,7 @@ func (cs *ControllerServer) DeleteVolume(
defer cs.OperationLocks.ReleaseDeleteLock(req.GetVolumeId())
// Find the volume using the provided VolumeID
volOptions, vID, err := newVolumeOptionsFromVolID(ctx, string(volID), nil, secrets)
volOptions, vID, err := core.NewVolumeOptionsFromVolID(ctx, string(volID), nil, secrets)
if err != nil {
// if error is ErrPoolNotFound, the pool is already deleted we dont
// need to worry about deleting subvolume or omap data, return success
@ -362,7 +364,7 @@ func (cs *ControllerServer) DeleteVolume(
}
defer cs.VolumeLocks.Release(volOptions.RequestName)
if err = undoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
if err = core.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@ -386,7 +388,7 @@ func (cs *ControllerServer) DeleteVolume(
}
defer cr.DeleteCredentials()
if err = volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), false); err != nil {
if err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), false); err != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err)
if errors.Is(err, cerrors.ErrVolumeHasSnapshots) {
return nil, status.Error(codes.FailedPrecondition, err.Error())
@ -397,7 +399,7 @@ func (cs *ControllerServer) DeleteVolume(
}
}
if err := undoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
if err := core.UndoVolReservation(ctx, volOptions, *vID, secrets); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@ -460,7 +462,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
}
defer cr.DeleteCredentials()
volOptions, volIdentifier, err := newVolumeOptionsFromVolID(ctx, volID, nil, secret)
volOptions, volIdentifier, err := core.NewVolumeOptionsFromVolID(ctx, volID, nil, secret)
if err != nil {
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
@ -470,8 +472,8 @@ func (cs *ControllerServer) ControllerExpandVolume(
RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
if err = volOptions.resizeVolume(ctx, volumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil {
log.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(volIdentifier.FsSubvolName), err)
if err = volOptions.ResizeVolume(ctx, fsutil.VolumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil {
log.ErrorLog(ctx, "failed to expand volume %s: %v", fsutil.VolumeID(volIdentifier.FsSubvolName), err)
return nil, status.Error(codes.Internal, err.Error())
}
@ -497,7 +499,7 @@ func (cs *ControllerServer) CreateSnapshot(
}
defer cr.DeleteCredentials()
clusterData, err := getClusterInformation(req.GetParameters())
clusterData, err := core.GetClusterInformation(req.GetParameters())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@ -521,7 +523,7 @@ func (cs *ControllerServer) CreateSnapshot(
defer cs.OperationLocks.ReleaseSnapshotCreateLock(sourceVolID)
// Find the volume using the provided VolumeID
parentVolOptions, vid, err := newVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets())
parentVolOptions, vid, err := core.NewVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets())
if err != nil {
if errors.Is(err, util.ErrPoolNotFound) {
log.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err)
@ -545,7 +547,7 @@ func (cs *ControllerServer) CreateSnapshot(
parentVolOptions.ClusterID)
}
cephfsSnap, genSnapErr := genSnapFromOptions(ctx, req)
cephfsSnap, genSnapErr := core.GenSnapFromOptions(ctx, req)
if genSnapErr != nil {
return nil, status.Error(codes.Internal, genSnapErr.Error())
}
@ -558,7 +560,7 @@ func (cs *ControllerServer) CreateSnapshot(
}
defer cs.VolumeLocks.Release(sourceVolID)
snapName := req.GetName()
sid, snapInfo, err := checkSnapExists(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
sid, snapInfo, err := core.CheckSnapExists(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@ -568,18 +570,18 @@ func (cs *ControllerServer) CreateSnapshot(
// as we are not able to retrieve the parent size we are rejecting the
// request to create snapshot.
// TODO: For this purpose we could make use of cached clusterAdditionalInfo too.
info, err := parentVolOptions.getSubVolumeInfo(ctx, volumeID(vid.FsSubvolName))
info, err := parentVolOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(vid.FsSubvolName))
if err != nil {
// Check error code value against ErrInvalidCommand to understand the cluster
// support it or not, It's safe to evaluate as the filtering
// is already done from getSubVolumeInfo() and send out the error here.
// is already done from GetSubVolumeInfo() and send out the error here.
if errors.Is(err, cerrors.ErrInvalidCommand) {
return nil, status.Error(
codes.FailedPrecondition,
"subvolume info command not supported in current ceph cluster")
}
if sid != nil {
errDefer := undoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr)
errDefer := core.UndoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr)
if errDefer != nil {
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
requestName, errDefer)
@ -592,8 +594,8 @@ func (cs *ControllerServer) CreateSnapshot(
if sid != nil {
// check snapshot is protected
protected := true
if !(snapInfo.Protected == snapshotIsProtected) {
err = parentVolOptions.protectSnapshot(ctx, volumeID(sid.FsSnapshotName), volumeID(vid.FsSubvolName))
if !(snapInfo.Protected == core.SnapshotIsProtected) {
err = parentVolOptions.ProtectSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(vid.FsSubvolName))
if err != nil {
protected = false
log.WarningLog(ctx, "failed to protect snapshot of snapshot: %s (%s)",
@ -613,13 +615,13 @@ func (cs *ControllerServer) CreateSnapshot(
}
// Reservation
sID, err := reserveSnap(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
sID, err := core.ReserveSnap(ctx, parentVolOptions, vid.FsSubvolName, cephfsSnap, cr)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
defer func() {
if err != nil {
errDefer := undoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr)
errDefer := core.UndoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr)
if errDefer != nil {
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
requestName, errDefer)
@ -642,11 +644,15 @@ func (cs *ControllerServer) CreateSnapshot(
}, nil
}
func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snapshotName string) (snapshotInfo, error) {
volID := volumeID(subvolumeName)
snapID := volumeID(snapshotName)
snap := snapshotInfo{}
err := volOpt.createSnapshot(ctx, snapID, volID)
func doSnapshot(
ctx context.Context,
volOpt *core.VolumeOptions,
subvolumeName,
snapshotName string) (core.SnapshotInfo, error) {
volID := fsutil.VolumeID(subvolumeName)
snapID := fsutil.VolumeID(snapshotName)
snap := core.SnapshotInfo{}
err := volOpt.CreateSnapshot(ctx, snapID, volID)
if err != nil {
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err)
@ -654,25 +660,25 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
}
defer func() {
if err != nil {
dErr := volOpt.deleteSnapshot(ctx, snapID, volID)
dErr := volOpt.DeleteSnapshot(ctx, snapID, volID)
if dErr != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapID, err)
}
}
}()
snap, err = volOpt.getSnapshotInfo(ctx, snapID, volID)
snap, err = volOpt.GetSnapshotInfo(ctx, snapID, volID)
if err != nil {
log.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err)
return snap, fmt.Errorf("failed to get snapshot info for snapshot:%s", snapID)
}
var t *timestamp.Timestamp
t, err = parseTime(ctx, snap.CreatedAt)
t, err = fsutil.ParseTime(ctx, snap.CreatedAt)
if err != nil {
return snap, err
}
snap.CreationTime = t
err = volOpt.protectSnapshot(ctx, snapID, volID)
err = volOpt.ProtectSnapshot(ctx, snapID, volID)
if err != nil {
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err)
}
@ -736,7 +742,7 @@ func (cs *ControllerServer) DeleteSnapshot(
}
defer cs.OperationLocks.ReleaseDeleteLock(snapshotID)
volOpt, snapInfo, sid, err := newSnapshotOptionsFromID(ctx, snapshotID, cr)
volOpt, snapInfo, sid, err := core.NewSnapshotOptionsFromID(ctx, snapshotID, cr)
if err != nil {
switch {
case errors.Is(err, util.ErrPoolNotFound):
@ -751,7 +757,7 @@ func (cs *ControllerServer) DeleteSnapshot(
// success as deletion is complete
return &csi.DeleteSnapshotResponse{}, nil
case errors.Is(err, cerrors.ErrSnapNotFound):
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.FsSubvolName, sid.FsSnapshotName, err)
@ -764,7 +770,7 @@ func (cs *ControllerServer) DeleteSnapshot(
// if the error is ErrVolumeNotFound, the subvolume is already deleted
// from backend, Hence undo the omap entries and return success
log.ErrorLog(ctx, "Volume not present")
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.FsSubvolName, sid.FsSnapshotName, err)
@ -791,17 +797,17 @@ func (cs *ControllerServer) DeleteSnapshot(
if snapInfo.HasPendingClones == "yes" {
return nil, status.Errorf(codes.FailedPrecondition, "snapshot %s has pending clones", snapshotID)
}
if snapInfo.Protected == snapshotIsProtected {
err = volOpt.unprotectSnapshot(ctx, volumeID(sid.FsSnapshotName), volumeID(sid.FsSubvolName))
if snapInfo.Protected == core.SnapshotIsProtected {
err = volOpt.UnprotectSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
}
err = volOpt.deleteSnapshot(ctx, volumeID(sid.FsSnapshotName), volumeID(sid.FsSubvolName))
err = volOpt.DeleteSnapshot(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
err = core.UndoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
if err != nil {
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
sid.RequestName, sid.FsSnapshotName, err)

View File

@ -14,13 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
package core
import (
"context"
"errors"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util/log"
)
@ -39,8 +40,8 @@ const (
// cephFSCloneComplete indicates that clone is in complete state.
cephFSCloneComplete = cephFSCloneState("complete")
// snapshotIsProtected string indicates that the snapshot is currently protected.
snapshotIsProtected = "yes"
// SnapshotIsProtected string indicates that the snapshot is currently protected.
SnapshotIsProtected = "yes"
)
// toError checks the state of the clone if it's not cephFSCloneComplete.
@ -61,9 +62,13 @@ func (cs cephFSCloneState) toError() error {
return nil
}
func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volOpt, parentvolOpt *volumeOptions) error {
func CreateCloneFromSubvolume(
ctx context.Context,
volID, cloneID fsutil.VolumeID,
volOpt,
parentvolOpt *VolumeOptions) error {
snapshotID := cloneID
err := parentvolOpt.createSnapshot(ctx, snapshotID, volID)
err := parentvolOpt.CreateSnapshot(ctx, snapshotID, volID)
if err != nil {
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err)
@ -77,17 +82,17 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
)
defer func() {
if protectErr != nil {
err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID)
err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID)
if err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
}
}
if cloneErr != nil {
if err = volOpt.purgeVolume(ctx, cloneID, true); err != nil {
if err = volOpt.PurgeVolume(ctx, cloneID, true); err != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err)
}
if err = parentvolOpt.unprotectSnapshot(ctx, snapshotID, volID); err != nil {
if err = parentvolOpt.UnprotectSnapshot(ctx, snapshotID, volID); err != nil {
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
// in that case we are safe and we could discard this error and we are good to go
// ahead with deletion
@ -95,12 +100,12 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
}
}
if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil {
if err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID); err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
}
}
}()
protectErr = parentvolOpt.protectSnapshot(ctx, snapshotID, volID)
protectErr = parentvolOpt.ProtectSnapshot(ctx, snapshotID, volID)
if protectErr != nil {
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr)
@ -127,14 +132,14 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
return cloneState.toError()
}
// This is a work around to fix sizing issue for cloned images
err = volOpt.resizeVolume(ctx, cloneID, volOpt.Size)
err = volOpt.ResizeVolume(ctx, cloneID, volOpt.Size)
if err != nil {
log.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err)
return err
}
// As we completed clone, remove the intermediate snap
if err = parentvolOpt.unprotectSnapshot(ctx, snapshotID, volID); err != nil {
if err = parentvolOpt.UnprotectSnapshot(ctx, snapshotID, volID); err != nil {
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
// in that case we are safe and we could discard this error and we are good to go
// ahead with deletion
@ -144,7 +149,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
return err
}
}
if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil {
if err = parentvolOpt.DeleteSnapshot(ctx, snapshotID, volID); err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
return err
@ -155,12 +160,12 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
func cleanupCloneFromSubvolumeSnapshot(
ctx context.Context,
volID, cloneID volumeID,
parentVolOpt *volumeOptions) error {
volID, cloneID fsutil.VolumeID,
parentVolOpt *VolumeOptions) error {
// snapshot name is same as clone name as we need a name which can be
// identified during PVC-PVC cloning.
snapShotID := cloneID
snapInfo, err := parentVolOpt.getSnapshotInfo(ctx, snapShotID, volID)
snapInfo, err := parentVolOpt.GetSnapshotInfo(ctx, snapShotID, volID)
if err != nil {
if errors.Is(err, cerrors.ErrSnapNotFound) {
return nil
@ -169,15 +174,15 @@ func cleanupCloneFromSubvolumeSnapshot(
return err
}
if snapInfo.Protected == snapshotIsProtected {
err = parentVolOpt.unprotectSnapshot(ctx, snapShotID, volID)
if snapInfo.Protected == SnapshotIsProtected {
err = parentVolOpt.UnprotectSnapshot(ctx, snapShotID, volID)
if err != nil {
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err)
return err
}
}
err = parentVolOpt.deleteSnapshot(ctx, snapShotID, volID)
err = parentVolOpt.DeleteSnapshot(ctx, snapShotID, volID)
if err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err)
@ -187,33 +192,32 @@ func cleanupCloneFromSubvolumeSnapshot(
return nil
}
// isCloneRetryError returns true if the clone error is pending,in-progress
// error.
func isCloneRetryError(err error) bool {
return errors.Is(err, cerrors.ErrCloneInProgress) || errors.Is(err, cerrors.ErrClonePending)
}
func createCloneFromSnapshot(
func CreateCloneFromSnapshot(
ctx context.Context,
parentVolOpt, volOptions *volumeOptions,
vID *volumeIdentifier,
sID *snapshotIdentifier) error {
snapID := volumeID(sID.FsSnapshotName)
err := parentVolOpt.cloneSnapshot(ctx, volumeID(sID.FsSubvolName), snapID, volumeID(vID.FsSubvolName), volOptions)
parentVolOpt, volOptions *VolumeOptions,
vID *VolumeIdentifier,
sID *SnapshotIdentifier) error {
snapID := fsutil.VolumeID(sID.FsSnapshotName)
err := parentVolOpt.cloneSnapshot(
ctx,
fsutil.VolumeID(sID.FsSubvolName),
snapID,
fsutil.VolumeID(vID.FsSubvolName),
volOptions)
if err != nil {
return err
}
defer func() {
if err != nil {
if !isCloneRetryError(err) {
if dErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true); dErr != nil {
if !cerrors.IsCloneRetryError(err) {
if dErr := volOptions.PurgeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), true); dErr != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, dErr)
}
}
}
}()
cloneState, err := volOptions.getCloneState(ctx, volumeID(vID.FsSubvolName))
cloneState, err := volOptions.getCloneState(ctx, fsutil.VolumeID(vID.FsSubvolName))
if err != nil {
log.ErrorLog(ctx, "failed to get clone state: %v", err)
@ -226,7 +230,7 @@ func createCloneFromSnapshot(
// The clonedvolume currently does not reflect the proper size due to an issue in cephfs
// however this is getting addressed in cephfs and the parentvolume size will be reflected
// in the new cloned volume too. Till then we are explicitly making the size set
err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size)
err = volOptions.ResizeVolume(ctx, fsutil.VolumeID(vID.FsSubvolName), volOptions.Size)
if err != nil {
log.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err)
@ -236,7 +240,7 @@ func createCloneFromSnapshot(
return nil
}
func (vo *volumeOptions) getCloneState(ctx context.Context, volID volumeID) (cephFSCloneState, error) {
func (vo *VolumeOptions) getCloneState(ctx context.Context, volID fsutil.VolumeID) (cephFSCloneState, error) {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
package core
import (
"testing"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
package core
import (
"context"
@ -25,7 +25,7 @@ import (
"github.com/ceph/ceph-csi/internal/util/log"
)
func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
func (vo *VolumeOptions) getFscID(ctx context.Context) (int64, error) {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem ID for %s:", vo.FsName, err)
@ -51,7 +51,7 @@ func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
return 0, cerrors.ErrVolumeNotFound
}
func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) {
func (vo *VolumeOptions) getMetadataPool(ctx context.Context) (string, error) {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
@ -75,7 +75,7 @@ func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) {
return "", fmt.Errorf("%w: could not find metadata pool for %s", util.ErrPoolNotFound, vo.FsName)
}
func (vo *volumeOptions) getFsName(ctx context.Context) (string, error) {
func (vo *VolumeOptions) getFsName(ctx context.Context) (string, error) {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem name for ID %d:", vo.FscID, err)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
package core
import (
"context"
@ -22,20 +22,32 @@ import (
"fmt"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/golang/protobuf/ptypes/timestamp"
)
// volumeIdentifier structure contains an association between the CSI VolumeID to its subvolume
var (
// VolJournal is used to maintain RADOS based journals for CO generated.
// VolumeName to backing CephFS subvolumes.
VolJournal *journal.Config
// SnapJournal is used to maintain RADOS based journals for CO generated.
// SnapshotName to backing CephFS subvolumes.
SnapJournal *journal.Config
)
// VolumeIdentifier structure contains an association between the CSI VolumeID to its subvolume
// name on the backing CephFS instance.
type volumeIdentifier struct {
type VolumeIdentifier struct {
FsSubvolName string
VolumeID string
}
type snapshotIdentifier struct {
type SnapshotIdentifier struct {
FsSnapshotName string
SnapshotID string
RequestName string
@ -44,7 +56,7 @@ type snapshotIdentifier struct {
}
/*
checkVolExists checks to determine if passed in RequestName in volOptions exists on the backend.
CheckVolExists checks to determine if passed in RequestName in volOptions exists on the backend.
**NOTE:** These functions manipulate the rados omaps that hold information regarding
volume names as requested by the CSI drivers. Hence, these need to be invoked only when the
@ -58,16 +70,16 @@ request name lock, and hence any stale omaps are leftovers from incomplete trans
hence safe to garbage collect.
*/
// nolint:gocognit,gocyclo,nestif,cyclop // TODO: reduce complexity
func checkVolExists(ctx context.Context,
func CheckVolExists(ctx context.Context,
volOptions,
parentVolOpt *volumeOptions,
parentVolOpt *VolumeOptions,
pvID *volumeIdentifier,
sID *snapshotIdentifier,
cr *util.Credentials) (*volumeIdentifier, error) {
var vid volumeIdentifier
pvID *VolumeIdentifier,
sID *SnapshotIdentifier,
cr *util.Credentials) (*VolumeIdentifier, error) {
var vid VolumeIdentifier
// Connect to cephfs' default radosNamespace (csi)
j, err := volJournal.Connect(volOptions.Monitors, radosNamespace, cr)
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
if err != nil {
return nil, err
}
@ -85,13 +97,13 @@ func checkVolExists(ctx context.Context,
vid.FsSubvolName = imageData.ImageAttributes.ImageName
if sID != nil || pvID != nil {
cloneState, cloneStateErr := volOptions.getCloneState(ctx, volumeID(vid.FsSubvolName))
cloneState, cloneStateErr := volOptions.getCloneState(ctx, fsutil.VolumeID(vid.FsSubvolName))
if cloneStateErr != nil {
if errors.Is(cloneStateErr, cerrors.ErrVolumeNotFound) {
if pvID != nil {
err = cleanupCloneFromSubvolumeSnapshot(
ctx, volumeID(pvID.FsSubvolName),
volumeID(vid.FsSubvolName),
ctx, fsutil.VolumeID(pvID.FsSubvolName),
fsutil.VolumeID(vid.FsSubvolName),
parentVolOpt)
if err != nil {
return nil, err
@ -112,7 +124,7 @@ func checkVolExists(ctx context.Context,
return nil, cerrors.ErrClonePending
}
if cloneState == cephFSCloneFailed {
err = volOptions.purgeVolume(ctx, volumeID(vid.FsSubvolName), true)
err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vid.FsSubvolName), true)
if err != nil {
log.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
@ -120,8 +132,8 @@ func checkVolExists(ctx context.Context,
}
if pvID != nil {
err = cleanupCloneFromSubvolumeSnapshot(
ctx, volumeID(pvID.FsSubvolName),
volumeID(vid.FsSubvolName),
ctx, fsutil.VolumeID(pvID.FsSubvolName),
fsutil.VolumeID(vid.FsSubvolName),
parentVolOpt)
if err != nil {
return nil, err
@ -136,7 +148,7 @@ func checkVolExists(ctx context.Context,
return nil, fmt.Errorf("clone is not in complete state for %s", vid.FsSubvolName)
}
}
volOptions.RootPath, err = volOptions.getVolumeRootPathCeph(ctx, volumeID(vid.FsSubvolName))
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vid.FsSubvolName))
if err != nil {
if errors.Is(err, cerrors.ErrVolumeNotFound) {
// If the subvolume is not present, cleanup the stale snapshot
@ -144,8 +156,8 @@ func checkVolExists(ctx context.Context,
if parentVolOpt != nil && pvID != nil {
err = cleanupCloneFromSubvolumeSnapshot(
ctx,
volumeID(pvID.FsSubvolName),
volumeID(vid.FsSubvolName),
fsutil.VolumeID(pvID.FsSubvolName),
fsutil.VolumeID(vid.FsSubvolName),
parentVolOpt)
if err != nil {
return nil, err
@ -168,7 +180,7 @@ func checkVolExists(ctx context.Context,
// found a volume already available, process and return it!
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
"", volOptions.ClusterID, imageUUID, volIDVersion)
"", volOptions.ClusterID, imageUUID, fsutil.VolIDVersion)
if err != nil {
return nil, err
}
@ -179,8 +191,8 @@ func checkVolExists(ctx context.Context,
if parentVolOpt != nil && pvID != nil {
err = cleanupCloneFromSubvolumeSnapshot(
ctx,
volumeID(pvID.FsSubvolName),
volumeID(vid.FsSubvolName),
fsutil.VolumeID(pvID.FsSubvolName),
fsutil.VolumeID(vid.FsSubvolName),
parentVolOpt)
if err != nil {
return nil, err
@ -190,11 +202,11 @@ func checkVolExists(ctx context.Context,
return &vid, nil
}
// undoVolReservation is a helper routine to undo a name reservation for a CSI VolumeName.
func undoVolReservation(
// UndoVolReservation is a helper routine to undo a name reservation for a CSI VolumeName.
func UndoVolReservation(
ctx context.Context,
volOptions *volumeOptions,
vid volumeIdentifier,
volOptions *VolumeOptions,
vid VolumeIdentifier,
secret map[string]string) error {
cr, err := util.NewAdminCredentials(secret)
if err != nil {
@ -203,7 +215,7 @@ func undoVolReservation(
defer cr.DeleteCredentials()
// Connect to cephfs' default radosNamespace (csi)
j, err := volJournal.Connect(volOptions.Monitors, radosNamespace, cr)
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
if err != nil {
return err
}
@ -215,7 +227,7 @@ func undoVolReservation(
return err
}
func updateTopologyConstraints(volOpts *volumeOptions) error {
func updateTopologyConstraints(volOpts *VolumeOptions) error {
// update request based on topology constrained parameters (if present)
poolName, _, topology, err := util.FindPoolAndTopology(volOpts.TopologyPools, volOpts.TopologyRequirement)
if err != nil {
@ -229,11 +241,11 @@ func updateTopologyConstraints(volOpts *volumeOptions) error {
return nil
}
// reserveVol is a helper routine to request a UUID reservation for the CSI VolumeName and,
// ReserveVol is a helper routine to request a UUID reservation for the CSI VolumeName and,
// to generate the volume identifier for the reserved UUID.
func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[string]string) (*volumeIdentifier, error) {
func ReserveVol(ctx context.Context, volOptions *VolumeOptions, secret map[string]string) (*VolumeIdentifier, error) {
var (
vid volumeIdentifier
vid VolumeIdentifier
imageUUID string
err error
)
@ -250,7 +262,7 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
}
// Connect to cephfs' default radosNamespace (csi)
j, err := volJournal.Connect(volOptions.Monitors, radosNamespace, cr)
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
if err != nil {
return nil, err
}
@ -266,7 +278,7 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
// generate the volume ID to return to the CO system
vid.VolumeID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
"", volOptions.ClusterID, imageUUID, volIDVersion)
"", volOptions.ClusterID, imageUUID, fsutil.VolIDVersion)
if err != nil {
return nil, err
}
@ -277,22 +289,22 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
return &vid, nil
}
// reserveSnap is a helper routine to request a UUID reservation for the CSI SnapName and,
// ReserveSnap is a helper routine to request a UUID reservation for the CSI SnapName and,
// to generate the snapshot identifier for the reserved UUID.
func reserveSnap(
func ReserveSnap(
ctx context.Context,
volOptions *volumeOptions,
volOptions *VolumeOptions,
parentSubVolName string,
snap *cephfsSnapshot,
cr *util.Credentials) (*snapshotIdentifier, error) {
snap *CephfsSnapshot,
cr *util.Credentials) (*SnapshotIdentifier, error) {
var (
vid snapshotIdentifier
vid SnapshotIdentifier
imageUUID string
err error
)
// Connect to cephfs' default radosNamespace (csi)
j, err := snapJournal.Connect(volOptions.Monitors, radosNamespace, cr)
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
if err != nil {
return nil, err
}
@ -308,7 +320,7 @@ func reserveSnap(
// generate the snapshot ID to return to the CO system
vid.SnapshotID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
"", volOptions.ClusterID, imageUUID, volIDVersion)
"", volOptions.ClusterID, imageUUID, fsutil.VolIDVersion)
if err != nil {
return nil, err
}
@ -319,15 +331,15 @@ func reserveSnap(
return &vid, nil
}
// undoSnapReservation is a helper routine to undo a name reservation for a CSI SnapshotName.
func undoSnapReservation(
// UndoSnapReservation is a helper routine to undo a name reservation for a CSI SnapshotName.
func UndoSnapReservation(
ctx context.Context,
volOptions *volumeOptions,
vid snapshotIdentifier,
volOptions *VolumeOptions,
vid SnapshotIdentifier,
snapName string,
cr *util.Credentials) error {
// Connect to cephfs' default radosNamespace (csi)
j, err := snapJournal.Connect(volOptions.Monitors, radosNamespace, cr)
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
if err != nil {
return err
}
@ -340,7 +352,7 @@ func undoSnapReservation(
}
/*
checkSnapExists checks to determine if passed in RequestName in volOptions exists on the backend.
CheckSnapExists checks to determine if passed in RequestName in volOptions exists on the backend.
**NOTE:** These functions manipulate the rados omaps that hold information regarding
volume names as requested by the CSI drivers. Hence, these need to be invoked only when the
@ -353,14 +365,14 @@ because, the order of omap creation and deletion are inverse of each other, and
request name lock, and hence any stale omaps are leftovers from incomplete transactions and are
hence safe to garbage collect.
*/
func checkSnapExists(
func CheckSnapExists(
ctx context.Context,
volOptions *volumeOptions,
volOptions *VolumeOptions,
parentSubVolName string,
snap *cephfsSnapshot,
cr *util.Credentials) (*snapshotIdentifier, *snapshotInfo, error) {
snap *CephfsSnapshot,
cr *util.Credentials) (*SnapshotIdentifier, *SnapshotInfo, error) {
// Connect to cephfs' default radosNamespace (csi)
j, err := snapJournal.Connect(volOptions.Monitors, radosNamespace, cr)
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
if err != nil {
return nil, nil, err
}
@ -374,11 +386,11 @@ func checkSnapExists(
if snapData == nil {
return nil, nil, nil
}
sid := &snapshotIdentifier{}
sid := &SnapshotIdentifier{}
snapUUID := snapData.ImageUUID
snapID := snapData.ImageAttributes.ImageName
sid.FsSnapshotName = snapData.ImageAttributes.ImageName
snapInfo, err := volOptions.getSnapshotInfo(ctx, volumeID(snapID), volumeID(parentSubVolName))
snapInfo, err := volOptions.GetSnapshotInfo(ctx, fsutil.VolumeID(snapID), fsutil.VolumeID(parentSubVolName))
if err != nil {
if errors.Is(err, cerrors.ErrSnapNotFound) {
err = j.UndoReservation(ctx, volOptions.MetadataPool,
@ -392,7 +404,7 @@ func checkSnapExists(
defer func() {
if err != nil {
err = volOptions.deleteSnapshot(ctx, volumeID(snapID), volumeID(parentSubVolName))
err = volOptions.DeleteSnapshot(ctx, fsutil.VolumeID(snapID), fsutil.VolumeID(parentSubVolName))
if err != nil {
log.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err)
@ -405,7 +417,7 @@ func checkSnapExists(
}
}
}()
tm, err := parseTime(ctx, snapInfo.CreatedAt)
tm, err := fsutil.ParseTime(ctx, snapInfo.CreatedAt)
if err != nil {
return nil, nil, err
}
@ -413,7 +425,7 @@ func checkSnapExists(
sid.CreationTime = tm
// found a snapshot already available, process and return it!
sid.SnapshotID, err = util.GenerateVolID(ctx, volOptions.Monitors, cr, volOptions.FscID,
"", volOptions.ClusterID, snapUUID, volIDVersion)
"", volOptions.ClusterID, snapUUID, fsutil.VolIDVersion)
if err != nil {
return nil, nil, err
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
package core
import (
"context"
@ -22,6 +22,7 @@ import (
"time"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util/log"
"github.com/ceph/go-ceph/cephfs/admin"
@ -35,8 +36,8 @@ const (
autoProtect = "snapshot-autoprotect"
)
// cephfsSnapshot represents a CSI snapshot and its cluster information.
type cephfsSnapshot struct {
// CephfsSnapshot represents a CSI snapshot and its cluster information.
type CephfsSnapshot struct {
NamePrefix string
Monitors string
// MetadataPool & Pool fields are not used atm. But its definitely good to have it in this struct
@ -49,7 +50,7 @@ type cephfsSnapshot struct {
ReservedID string
}
func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volumeID) error {
func (vo *VolumeOptions) CreateSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
@ -68,7 +69,7 @@ func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volum
return nil
}
func (vo *volumeOptions) deleteSnapshot(ctx context.Context, snapID, volID volumeID) error {
func (vo *VolumeOptions) DeleteSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
@ -87,15 +88,15 @@ func (vo *volumeOptions) deleteSnapshot(ctx context.Context, snapID, volID volum
return nil
}
type snapshotInfo struct {
type SnapshotInfo struct {
CreatedAt time.Time
CreationTime *timestamp.Timestamp
HasPendingClones string
Protected string
}
func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volumeID) (snapshotInfo, error) {
snap := snapshotInfo{}
func (vo *VolumeOptions) GetSnapshotInfo(ctx context.Context, snapID, volID fsutil.VolumeID) (SnapshotInfo, error) {
snap := SnapshotInfo{}
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
@ -125,7 +126,7 @@ func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volu
return snap, nil
}
func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volumeID) error {
func (vo *VolumeOptions) ProtectSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
// If "snapshot-autoprotect" feature is present, The ProtectSnapshot
// call should be treated as a no-op.
if checkSubvolumeHasFeature(autoProtect, vo.Features) {
@ -158,7 +159,7 @@ func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volu
return nil
}
func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID volumeID) error {
func (vo *VolumeOptions) UnprotectSnapshot(ctx context.Context, snapID, volID fsutil.VolumeID) error {
// If "snapshot-autoprotect" feature is present, The UnprotectSnapshot
// call should be treated as a no-op.
if checkSubvolumeHasFeature(autoProtect, vo.Features) {
@ -193,10 +194,10 @@ func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID vo
return nil
}
func (vo *volumeOptions) cloneSnapshot(
func (vo *VolumeOptions) cloneSnapshot(
ctx context.Context,
volID, snapID, cloneID volumeID,
cloneVolOptions *volumeOptions,
volID, snapID, cloneID fsutil.VolumeID,
cloneVolOptions *VolumeOptions,
) error {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
package core
import (
"context"
@ -24,6 +24,7 @@ import (
"strings"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
@ -54,11 +55,11 @@ type Subvolume struct {
Features []string
}
func getVolumeRootPathCephDeprecated(volID volumeID) string {
func GetVolumeRootPathCephDeprecated(volID fsutil.VolumeID) string {
return path.Join("/", "csi-volumes", string(volID))
}
func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volumeID) (string, error) {
func (vo *VolumeOptions) GetVolumeRootPathCeph(ctx context.Context, volID fsutil.VolumeID) (string, error) {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin err %s", err)
@ -78,7 +79,7 @@ func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volume
return svPath, nil
}
func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) (*Subvolume, error) {
func (vo *VolumeOptions) GetSubVolumeInfo(ctx context.Context, volID fsutil.VolumeID) (*Subvolume, error) {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
@ -141,7 +142,7 @@ type localClusterState struct {
subVolumeGroupCreated bool
}
func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID, bytesQuota int64) error {
func CreateVolume(ctx context.Context, volOptions *VolumeOptions, volID fsutil.VolumeID, bytesQuota int64) error {
// verify if corresponding ClusterID key is present in the map,
// and if not, initialize with default values(false).
if _, keyPresent := clusterAdditionalInfo[volOptions.ClusterID]; !keyPresent {
@ -192,10 +193,10 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
return nil
}
// resizeVolume will try to use ceph fs subvolume resize command to resize the
// ResizeVolume will try to use ceph fs subvolume resize command to resize the
// subvolume. If the command is not available as a fallback it will use
// CreateVolume to resize the subvolume.
func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytesQuota int64) error {
func (vo *VolumeOptions) ResizeVolume(ctx context.Context, volID fsutil.VolumeID, bytesQuota int64) error {
// keyPresent checks whether corresponding clusterID key is present in clusterAdditionalInfo
var keyPresent bool
// verify if corresponding ClusterID key is present in the map,
@ -229,10 +230,10 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
}
clusterAdditionalInfo[vo.ClusterID].resizeState = unsupported
return createVolume(ctx, vo, volID, bytesQuota)
return CreateVolume(ctx, vo, volID, bytesQuota)
}
func (vo *volumeOptions) purgeVolume(ctx context.Context, volID volumeID, force bool) error {
func (vo *VolumeOptions) PurgeVolume(ctx context.Context, volID fsutil.VolumeID, force bool) error {
fsa, err := vo.conn.GetFSAdmin()
if err != nil {
log.ErrorLog(ctx, "could not get FSAdmin %s:", err)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
package core
import (
"context"
@ -74,7 +74,7 @@ func execCommandErr(ctx context.Context, program string, args ...string) error {
// Load available ceph mounters installed on system into availableMounters
// Called from driver.go's Run().
func loadAvailableMounters(conf *util.Config) error {
func LoadAvailableMounters(conf *util.Config) error {
// #nosec
fuseMounterProbe := exec.Command("ceph-fuse", "--version")
// #nosec
@ -113,12 +113,12 @@ func loadAvailableMounters(conf *util.Config) error {
return nil
}
type volumeMounter interface {
mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error
name() string
type VolumeMounter interface {
Mount(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *VolumeOptions) error
Name() string
}
func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
func NewMounter(volOptions *VolumeOptions) (VolumeMounter, error) {
// Get the mounter from the configuration
wantMounter := volOptions.Mounter
@ -145,17 +145,17 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
switch chosenMounter {
case volumeMounterFuse:
return &fuseMounter{}, nil
return &FuseMounter{}, nil
case volumeMounterKernel:
return &kernelMounter{}, nil
return &KernelMounter{}, nil
}
return nil, fmt.Errorf("unknown mounter '%s'", chosenMounter)
}
type fuseMounter struct{}
type FuseMounter struct{}
func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *VolumeOptions) error {
args := []string{
mountPoint,
"-m", volOptions.Monitors,
@ -203,11 +203,11 @@ func mountFuse(ctx context.Context, mountPoint string, cr *util.Credentials, vol
return nil
}
func (m *fuseMounter) mount(
func (m *FuseMounter) Mount(
ctx context.Context,
mountPoint string,
cr *util.Credentials,
volOptions *volumeOptions) error {
volOptions *VolumeOptions) error {
if err := util.CreateMountPoint(mountPoint); err != nil {
return err
}
@ -215,11 +215,11 @@ func (m *fuseMounter) mount(
return mountFuse(ctx, mountPoint, cr, volOptions)
}
func (m *fuseMounter) name() string { return "Ceph FUSE driver" }
func (m *FuseMounter) Name() string { return "Ceph FUSE driver" }
type kernelMounter struct{}
type KernelMounter struct{}
func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *volumeOptions) error {
func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, volOptions *VolumeOptions) error {
if err := execCommandErr(ctx, "modprobe", "ceph"); err != nil {
return err
}
@ -247,11 +247,11 @@ func mountKernel(ctx context.Context, mountPoint string, cr *util.Credentials, v
return err
}
func (m *kernelMounter) mount(
func (m *KernelMounter) Mount(
ctx context.Context,
mountPoint string,
cr *util.Credentials,
volOptions *volumeOptions) error {
volOptions *VolumeOptions) error {
if err := util.CreateMountPoint(mountPoint); err != nil {
return err
}
@ -259,9 +259,9 @@ func (m *kernelMounter) mount(
return mountKernel(ctx, mountPoint, cr, volOptions)
}
func (m *kernelMounter) name() string { return "Ceph kernel client" }
func (m *KernelMounter) Name() string { return "Ceph kernel client" }
func bindMount(ctx context.Context, from, to string, readOnly bool, mntOptions []string) error {
func BindMount(ctx context.Context, from, to string, readOnly bool, mntOptions []string) error {
mntOptionSli := strings.Join(mntOptions, ",")
if err := execCommandErr(ctx, "mount", "-o", mntOptionSli, from, to); err != nil {
return fmt.Errorf("failed to bind-mount %s to %s: %w", from, to, err)
@ -277,7 +277,7 @@ func bindMount(ctx context.Context, from, to string, readOnly bool, mntOptions [
return nil
}
func unmountVolume(ctx context.Context, mountPoint string) error {
func UnmountVolume(ctx context.Context, mountPoint string) error {
if _, stderr, err := util.ExecCommand(ctx, "umount", mountPoint); err != nil {
err = fmt.Errorf("%w stderr: %s", err, stderr)
if strings.Contains(err.Error(), fmt.Sprintf("umount: %s: not mounted", mountPoint)) ||

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
package core
import (
"context"
@ -26,13 +26,12 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
)
type volumeID string
type volumeOptions struct {
type VolumeOptions struct {
TopologyPools *[]util.TopologyConstrainedPool
TopologyRequirement *csi.TopologyRequirement
Topology map[string]string
@ -60,7 +59,7 @@ type volumeOptions struct {
}
// Connect a CephFS volume to the Ceph cluster.
func (vo *volumeOptions) Connect(cr *util.Credentials) error {
func (vo *VolumeOptions) Connect(cr *util.Credentials) error {
if vo.conn != nil {
return nil
}
@ -77,7 +76,7 @@ func (vo *volumeOptions) Connect(cr *util.Credentials) error {
// Destroy cleans up the CephFS volume object and closes the connection to the
// Ceph cluster in case one was setup.
func (vo *volumeOptions) Destroy() {
func (vo *VolumeOptions) Destroy() {
if vo.conn != nil {
vo.conn.Destroy()
}
@ -147,7 +146,7 @@ func extractMounter(dest *string, options map[string]string) error {
return nil
}
func getClusterInformation(options map[string]string) (*util.ClusterInfo, error) {
func GetClusterInformation(options map[string]string) (*util.ClusterInfo, error) {
clusterID, ok := options["clusterID"]
if !ok {
err := fmt.Errorf("clusterID must be set")
@ -181,17 +180,17 @@ func getClusterInformation(options map[string]string) (*util.ClusterInfo, error)
return clusterData, nil
}
// newVolumeOptions generates a new instance of volumeOptions from the provided
// NewVolumeOptions generates a new instance of volumeOptions from the provided
// CSI request parameters.
func newVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest,
cr *util.Credentials) (*volumeOptions, error) {
func NewVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVolumeRequest,
cr *util.Credentials) (*VolumeOptions, error) {
var (
opts volumeOptions
opts VolumeOptions
err error
)
volOptions := req.GetParameters()
clusterData, err := getClusterInformation(volOptions)
clusterData, err := GetClusterInformation(volOptions)
if err != nil {
return nil, err
}
@ -259,16 +258,16 @@ func newVolumeOptions(ctx context.Context, requestName string, req *csi.CreateVo
return &opts, nil
}
// newVolumeOptionsFromVolID generates a new instance of volumeOptions and volumeIdentifier
// newVolumeOptionsFromVolID generates a new instance of volumeOptions and VolumeIdentifier
// from the provided CSI VolumeID.
func newVolumeOptionsFromVolID(
func NewVolumeOptionsFromVolID(
ctx context.Context,
volID string,
volOpt, secrets map[string]string) (*volumeOptions, *volumeIdentifier, error) {
volOpt, secrets map[string]string) (*VolumeOptions, *VolumeIdentifier, error) {
var (
vi util.CSIIdentifier
volOptions volumeOptions
vid volumeIdentifier
volOptions VolumeOptions
vid VolumeIdentifier
)
// Decode the VolID first, to detect older volumes or pre-provisioned volumes
@ -320,7 +319,7 @@ func newVolumeOptionsFromVolID(
}
// Connect to cephfs' default radosNamespace (csi)
j, err := volJournal.Connect(volOptions.Monitors, radosNamespace, cr)
j, err := VolJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
if err != nil {
return nil, nil, err
}
@ -358,27 +357,27 @@ func newVolumeOptionsFromVolID(
volOptions.ProvisionVolume = true
info, err := volOptions.getSubVolumeInfo(ctx, volumeID(vid.FsSubvolName))
info, err := volOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(vid.FsSubvolName))
if err == nil {
volOptions.RootPath = info.Path
volOptions.Features = info.Features
}
if errors.Is(err, cerrors.ErrInvalidCommand) {
volOptions.RootPath, err = volOptions.getVolumeRootPathCeph(ctx, volumeID(vid.FsSubvolName))
volOptions.RootPath, err = volOptions.GetVolumeRootPathCeph(ctx, fsutil.VolumeID(vid.FsSubvolName))
}
return &volOptions, &vid, err
}
// newVolumeOptionsFromMonitorList generates a new instance of volumeOptions and
// volumeIdentifier from the provided CSI volume context.
func newVolumeOptionsFromMonitorList(
// NewVolumeOptionsFromMonitorList generates a new instance of VolumeOptions and
// VolumeIdentifier from the provided CSI volume context.
func NewVolumeOptionsFromMonitorList(
volID string,
options, secrets map[string]string) (*volumeOptions, *volumeIdentifier, error) {
options, secrets map[string]string) (*VolumeOptions, *VolumeIdentifier, error) {
var (
opts volumeOptions
vid volumeIdentifier
opts VolumeOptions
vid VolumeIdentifier
provisionVolumeBool string
err error
)
@ -408,7 +407,7 @@ func newVolumeOptionsFromMonitorList(
return nil, nil, err
}
opts.RootPath = getVolumeRootPathCephDeprecated(volumeID(volID))
opts.RootPath = GetVolumeRootPathCephDeprecated(fsutil.VolumeID(volID))
} else {
if err = extractOption(&opts.RootPath, "rootPath", options); err != nil {
return nil, nil, err
@ -433,15 +432,15 @@ func newVolumeOptionsFromMonitorList(
return &opts, &vid, nil
}
// newVolumeOptionsFromStaticVolume generates a new instance of volumeOptions and
// volumeIdentifier from the provided CSI volume context, if the provided context is
// NewVolumeOptionsFromStaticVolume generates a new instance of volumeOptions and
// VolumeIdentifier from the provided CSI volume context, if the provided context is
// detected to be a statically provisioned volume.
func newVolumeOptionsFromStaticVolume(
func NewVolumeOptionsFromStaticVolume(
volID string,
options map[string]string) (*volumeOptions, *volumeIdentifier, error) {
options map[string]string) (*VolumeOptions, *VolumeIdentifier, error) {
var (
opts volumeOptions
vid volumeIdentifier
opts VolumeOptions
vid VolumeIdentifier
staticVol bool
err error
)
@ -463,7 +462,7 @@ func newVolumeOptionsFromStaticVolume(
// store NOT of static boolean
opts.ProvisionVolume = !staticVol
clusterData, err := getClusterInformation(options)
clusterData, err := GetClusterInformation(options)
if err != nil {
return nil, nil, err
}
@ -502,16 +501,16 @@ func newVolumeOptionsFromStaticVolume(
return &opts, &vid, nil
}
// newSnapshotOptionsFromID generates a new instance of volumeOptions and snapshotIdentifier
// NewSnapshotOptionsFromID generates a new instance of volumeOptions and SnapshotIdentifier
// from the provided CSI VolumeID.
func newSnapshotOptionsFromID(
func NewSnapshotOptionsFromID(
ctx context.Context,
snapID string,
cr *util.Credentials) (*volumeOptions, *snapshotInfo, *snapshotIdentifier, error) {
cr *util.Credentials) (*VolumeOptions, *SnapshotInfo, *SnapshotIdentifier, error) {
var (
vi util.CSIIdentifier
volOptions volumeOptions
sid snapshotIdentifier
volOptions VolumeOptions
sid SnapshotIdentifier
)
// Decode the snapID first, to detect pre-provisioned snapshot before other errors
err := vi.DecomposeCSIID(snapID)
@ -560,7 +559,7 @@ func newSnapshotOptionsFromID(
}
// Connect to cephfs' default radosNamespace (csi)
j, err := snapJournal.Connect(volOptions.Monitors, radosNamespace, cr)
j, err := SnapJournal.Connect(volOptions.Monitors, fsutil.RadosNamespace, cr)
if err != nil {
return &volOptions, nil, &sid, err
}
@ -576,13 +575,13 @@ func newSnapshotOptionsFromID(
sid.FsSnapshotName = imageAttributes.ImageName
sid.FsSubvolName = imageAttributes.SourceName
subvolInfo, err := volOptions.getSubVolumeInfo(ctx, volumeID(sid.FsSubvolName))
subvolInfo, err := volOptions.GetSubVolumeInfo(ctx, fsutil.VolumeID(sid.FsSubvolName))
if err != nil {
return &volOptions, nil, &sid, err
}
volOptions.Features = subvolInfo.Features
info, err := volOptions.getSnapshotInfo(ctx, volumeID(sid.FsSnapshotName), volumeID(sid.FsSubvolName))
info, err := volOptions.GetSnapshotInfo(ctx, fsutil.VolumeID(sid.FsSnapshotName), fsutil.VolumeID(sid.FsSubvolName))
if err != nil {
return &volOptions, nil, &sid, err
}
@ -590,8 +589,8 @@ func newSnapshotOptionsFromID(
return &volOptions, &info, &sid, nil
}
func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (snap *cephfsSnapshot, err error) {
cephfsSnap := &cephfsSnapshot{}
func GenSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (snap *CephfsSnapshot, err error) {
cephfsSnap := &CephfsSnapshot{}
cephfsSnap.RequestName = req.GetName()
snapOptions := req.GetParameters()

View File

@ -17,6 +17,8 @@ limitations under the License.
package cephfs
import (
"github.com/ceph/ceph-csi/internal/cephfs/core"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/journal"
"github.com/ceph/ceph-csi/internal/util"
@ -25,14 +27,6 @@ import (
"github.com/container-storage-interface/spec/lib/go/csi"
)
const (
// volIDVersion is the version number of volume ID encoding scheme.
volIDVersion uint16 = 1
// RADOS namespace to store CSI specific objects and keys.
radosNamespace = "csi"
)
// Driver contains the default identity,node and controller struct.
type Driver struct {
cd *csicommon.CSIDriver
@ -46,14 +40,6 @@ var (
// CSIInstanceID is the instance ID that is unique to an instance of CSI, used when sharing
// ceph clusters across CSI instances, to differentiate omap names per CSI instance.
CSIInstanceID = "default"
// volJournal is used to maintain RADOS based journals for CO generated
// VolumeName to backing CephFS subvolumes.
volJournal *journal.Config
// snapJournal is used to maintain RADOS based journals for CO generated
// SnapshotName to backing CephFS subvolumes.
snapJournal *journal.Config
)
// NewDriver returns new ceph driver.
@ -93,7 +79,7 @@ func (fs *Driver) Run(conf *util.Config) {
var topology map[string]string
// Configuration
if err = loadAvailableMounters(conf); err != nil {
if err = core.LoadAvailableMounters(conf); err != nil {
log.FatalLogMsg("cephfs: failed to load ceph mounters: %v", err)
}
@ -102,9 +88,9 @@ func (fs *Driver) Run(conf *util.Config) {
CSIInstanceID = conf.InstanceID
}
// Create an instance of the volume journal
volJournal = journal.NewCSIVolumeJournalWithNamespace(CSIInstanceID, radosNamespace)
core.VolJournal = journal.NewCSIVolumeJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
snapJournal = journal.NewCSISnapshotJournalWithNamespace(CSIInstanceID, radosNamespace)
core.SnapJournal = journal.NewCSISnapshotJournalWithNamespace(CSIInstanceID, fsutil.RadosNamespace)
// Initialize default library driver
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)

View File

@ -62,3 +62,9 @@ var (
// ErrVolumeHasSnapshots is returned when a subvolume has snapshots.
ErrVolumeHasSnapshots = coreError.New("volume has snapshots")
)
// IsCloneRetryError returns true if the clone error is pending,in-progress
// error.
func IsCloneRetryError(err error) bool {
return coreError.Is(err, ErrCloneInProgress) || coreError.Is(err, ErrClonePending)
}

View File

@ -23,7 +23,9 @@ import (
"os"
"strings"
"github.com/ceph/ceph-csi/internal/cephfs/core"
cerrors "github.com/ceph/ceph-csi/internal/cephfs/errors"
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
"github.com/ceph/ceph-csi/internal/util"
"github.com/ceph/ceph-csi/internal/util/log"
@ -42,7 +44,9 @@ type NodeServer struct {
VolumeLocks *util.VolumeLocks
}
func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
func getCredentialsForVolume(
volOptions *core.VolumeOptions,
req *csi.NodeStageVolumeRequest) (*util.Credentials, error) {
var (
err error
cr *util.Credentials
@ -72,7 +76,7 @@ func getCredentialsForVolume(volOptions *volumeOptions, req *csi.NodeStageVolume
func (ns *NodeServer) NodeStageVolume(
ctx context.Context,
req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
var volOptions *volumeOptions
var volOptions *core.VolumeOptions
if err := util.ValidateNodeStageVolumeRequest(req); err != nil {
return nil, err
}
@ -80,7 +84,7 @@ func (ns *NodeServer) NodeStageVolume(
// Configuration
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
volID := fsutil.VolumeID(req.GetVolumeId())
if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired {
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
@ -89,21 +93,21 @@ func (ns *NodeServer) NodeStageVolume(
}
defer ns.VolumeLocks.Release(req.GetVolumeId())
volOptions, _, err := newVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
volOptions, _, err := core.NewVolumeOptionsFromVolID(ctx, string(volID), req.GetVolumeContext(), req.GetSecrets())
if err != nil {
if !errors.Is(err, cerrors.ErrInvalidVolID) {
return nil, status.Error(codes.Internal, err.Error())
}
// gets mon IPs from the supplied cluster info
volOptions, _, err = newVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
volOptions, _, err = core.NewVolumeOptionsFromStaticVolume(string(volID), req.GetVolumeContext())
if err != nil {
if !errors.Is(err, cerrors.ErrNonStaticVolume) {
return nil, status.Error(codes.Internal, err.Error())
}
// get mon IPs from the volume context
volOptions, _, err = newVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(),
volOptions, _, err = core.NewVolumeOptionsFromMonitorList(string(volID), req.GetVolumeContext(),
req.GetSecrets())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
@ -137,9 +141,9 @@ func (ns *NodeServer) NodeStageVolume(
return &csi.NodeStageVolumeResponse{}, nil
}
func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *csi.NodeStageVolumeRequest) error {
func (*NodeServer) mount(ctx context.Context, volOptions *core.VolumeOptions, req *csi.NodeStageVolumeRequest) error {
stagingTargetPath := req.GetStagingTargetPath()
volID := volumeID(req.GetVolumeId())
volID := fsutil.VolumeID(req.GetVolumeId())
cr, err := getCredentialsForVolume(volOptions, req)
if err != nil {
@ -149,14 +153,14 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
}
defer cr.DeleteCredentials()
m, err := newMounter(volOptions)
m, err := core.NewMounter(volOptions)
if err != nil {
log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
return status.Error(codes.Internal, err.Error())
}
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.name())
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.Name())
readOnly := "ro"
fuseMountOptions := strings.Split(volOptions.FuseMountOptions, ",")
@ -165,12 +169,12 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
if req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||
req.VolumeCapability.AccessMode.Mode == csi.VolumeCapability_AccessMode_SINGLE_NODE_READER_ONLY {
switch m.(type) {
case *fuseMounter:
case *core.FuseMounter:
if !csicommon.MountOptionContains(strings.Split(volOptions.FuseMountOptions, ","), readOnly) {
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, readOnly)
fuseMountOptions = append(fuseMountOptions, readOnly)
}
case *kernelMounter:
case *core.KernelMounter:
if !csicommon.MountOptionContains(strings.Split(volOptions.KernelMountOptions, ","), readOnly) {
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, readOnly)
kernelMountOptions = append(kernelMountOptions, readOnly)
@ -178,7 +182,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
}
}
if err = m.mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
if err = m.Mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
log.ErrorLog(ctx,
"failed to mount volume %s: %v Check dmesg logs if required.",
volID,
@ -197,7 +201,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
stagingTargetPath,
volID,
err)
uErr := unmountVolume(ctx, stagingTargetPath)
uErr := core.UnmountVolume(ctx, stagingTargetPath)
if uErr != nil {
log.ErrorLog(
ctx,
@ -259,7 +263,12 @@ func (ns *NodeServer) NodePublishVolume(
// It's not, mount now
if err = bindMount(ctx, req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil {
if err = core.BindMount(
ctx,
req.GetStagingTargetPath(),
req.GetTargetPath(),
req.GetReadonly(),
mountOptions); err != nil {
log.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err)
return nil, status.Error(codes.Internal, err.Error())
@ -301,7 +310,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
}
// Unmount the bind-mount
if err = unmountVolume(ctx, targetPath); err != nil {
if err = core.UnmountVolume(ctx, targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
@ -349,7 +358,7 @@ func (ns *NodeServer) NodeUnstageVolume(
return &csi.NodeUnstageVolumeResponse{}, nil
}
// Unmount the volume
if err = unmountVolume(ctx, stagingTargetPath); err != nil {
if err = core.UnmountVolume(ctx, stagingTargetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error())
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package cephfs
package util
import (
"context"
@ -26,7 +26,18 @@ import (
"github.com/golang/protobuf/ptypes/timestamp"
)
func parseTime(ctx context.Context, createTime time.Time) (*timestamp.Timestamp, error) {
// VolumeID string representation.
type VolumeID string
const (
// VolIDVersion is the version number of volume ID encoding scheme.
VolIDVersion uint16 = 1
// RadosNamespace to store CSI specific objects and keys.
RadosNamespace = "csi"
)
func ParseTime(ctx context.Context, createTime time.Time) (*timestamp.Timestamp, error) {
tm, err := ptypes.TimestampProto(createTime)
if err != nil {
log.ErrorLog(ctx, "failed to convert time %s %v", createTime, err)

View File

@ -880,7 +880,7 @@ func genSnapFromSnapID(
}
rbdSnap.JournalPool = rbdSnap.Pool
rbdSnap.RadosNamespace, err = util.RadosNamespace(util.CsiConfigFile, rbdSnap.ClusterID)
rbdSnap.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdSnap.ClusterID)
if err != nil {
return err
}
@ -959,7 +959,7 @@ func generateVolumeFromVolumeID(
return rbdVol, err
}
rbdVol.RadosNamespace, err = util.RadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
rbdVol.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
if err != nil {
return rbdVol, err
}
@ -1179,7 +1179,7 @@ func genVolFromVolumeOptions(
return nil, err
}
rbdVol.RadosNamespace, err = util.RadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
rbdVol.RadosNamespace, err = util.GetRadosNamespace(util.CsiConfigFile, rbdVol.ClusterID)
if err != nil {
return nil, err
}

View File

@ -108,8 +108,8 @@ func Mons(pathToConfig, clusterID string) (string, error) {
return strings.Join(cluster.Monitors, ","), nil
}
// RadosNamespace returns the namespace for the given clusterID.
func RadosNamespace(pathToConfig, clusterID string) (string, error) {
// GetRadosNamespace returns the namespace for the given clusterID.
func GetRadosNamespace(pathToConfig, clusterID string) (string, error) {
cluster, err := readClusterInfo(pathToConfig, clusterID)
if err != nil {
return "", err