mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 02:33:34 +00:00
cleanup: move log functions to new internal/util/log package
Moving the log functions into its own internal/util/log package makes it possible to split out the humongous internal/util packages in further smaller pieces. This reduces the inter-dependencies between utility functions and components, preventing circular dependencies which are not allowed in Go. Updates: #852 Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
committed by
mergify[bot]
parent
2036b587d7
commit
6d00b39886
@ -21,19 +21,20 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem ID for %s:", vo.FsName, err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem ID for %s:", vo.FsName, err)
|
||||
|
||||
return 0, err
|
||||
}
|
||||
|
||||
volumes, err := fsa.EnumerateVolumes()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not list volumes, can not fetch filesystem ID for %s:", vo.FsName, err)
|
||||
log.ErrorLog(ctx, "could not list volumes, can not fetch filesystem ID for %s:", vo.FsName, err)
|
||||
|
||||
return 0, err
|
||||
}
|
||||
@ -44,7 +45,7 @@ func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
|
||||
}
|
||||
}
|
||||
|
||||
util.ErrorLog(ctx, "failed to list volume %s", vo.FsName)
|
||||
log.ErrorLog(ctx, "failed to list volume %s", vo.FsName)
|
||||
|
||||
return 0, ErrVolumeNotFound
|
||||
}
|
||||
@ -52,14 +53,14 @@ func (vo *volumeOptions) getFscID(ctx context.Context) (int64, error) {
|
||||
func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
fsPoolInfos, err := fsa.ListFileSystems()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not list filesystems, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
log.ErrorLog(ctx, "could not list filesystems, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
@ -76,14 +77,14 @@ func (vo *volumeOptions) getMetadataPool(ctx context.Context) (string, error) {
|
||||
func (vo *volumeOptions) getFsName(ctx context.Context) (string, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem name for ID %d:", vo.FscID, err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch filesystem name for ID %d:", vo.FscID, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
||||
volumes, err := fsa.EnumerateVolumes()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not list volumes, can not fetch filesystem name for ID %d:", vo.FscID, err)
|
||||
log.ErrorLog(ctx, "could not list volumes, can not fetch filesystem name for ID %d:", vo.FscID, err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
// cephFSCloneState describes the status of the clone.
|
||||
@ -64,7 +64,7 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
snapshotID := cloneID
|
||||
err := parentvolOpt.createSnapshot(ctx, snapshotID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err)
|
||||
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapshotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -78,57 +78,57 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
if protectErr != nil {
|
||||
err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
}
|
||||
}
|
||||
|
||||
if cloneErr != nil {
|
||||
if err = volOpt.purgeVolume(ctx, cloneID, true); err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err)
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", cloneID, err)
|
||||
}
|
||||
if err = parentvolOpt.unprotectSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
// In case the snap is already unprotected we get ErrSnapProtectionExist error code
|
||||
// in that case we are safe and we could discard this error and we are good to go
|
||||
// ahead with deletion
|
||||
if !errors.Is(err, ErrSnapProtectionExist) {
|
||||
util.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
|
||||
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
|
||||
}
|
||||
}
|
||||
if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
protectErr = parentvolOpt.protectSnapshot(ctx, snapshotID, volID)
|
||||
if protectErr != nil {
|
||||
util.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr)
|
||||
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapshotID, protectErr)
|
||||
|
||||
return protectErr
|
||||
}
|
||||
|
||||
cloneErr = parentvolOpt.cloneSnapshot(ctx, volID, snapshotID, cloneID, volOpt)
|
||||
if cloneErr != nil {
|
||||
util.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", volID, snapshotID, cloneID, cloneErr)
|
||||
log.ErrorLog(ctx, "failed to clone snapshot %s %s to %s %v", volID, snapshotID, cloneID, cloneErr)
|
||||
|
||||
return cloneErr
|
||||
}
|
||||
|
||||
cloneState, cloneErr := volOpt.getCloneState(ctx, cloneID)
|
||||
if cloneErr != nil {
|
||||
util.ErrorLog(ctx, "failed to get clone state: %v", cloneErr)
|
||||
log.ErrorLog(ctx, "failed to get clone state: %v", cloneErr)
|
||||
|
||||
return cloneErr
|
||||
}
|
||||
|
||||
if cloneState != cephFSCloneComplete {
|
||||
util.ErrorLog(ctx, "clone %s did not complete: %v", cloneID, cloneState.toError())
|
||||
log.ErrorLog(ctx, "clone %s did not complete: %v", cloneID, cloneState.toError())
|
||||
|
||||
return cloneState.toError()
|
||||
}
|
||||
// This is a work around to fix sizing issue for cloned images
|
||||
err = volOpt.resizeVolume(ctx, cloneID, volOpt.Size)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err)
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", cloneID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -138,13 +138,13 @@ func createCloneFromSubvolume(ctx context.Context, volID, cloneID volumeID, volO
|
||||
// in that case we are safe and we could discard this error and we are good to go
|
||||
// ahead with deletion
|
||||
if !errors.Is(err, ErrSnapProtectionExist) {
|
||||
util.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
|
||||
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapshotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err = parentvolOpt.deleteSnapshot(ctx, snapshotID, volID); err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapshotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -171,14 +171,14 @@ func cleanupCloneFromSubvolumeSnapshot(
|
||||
if snapInfo.Protected == snapshotIsProtected {
|
||||
err = parentVolOpt.unprotectSnapshot(ctx, snapShotID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err)
|
||||
log.ErrorLog(ctx, "failed to unprotect snapshot %s %v", snapShotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
err = parentVolOpt.deleteSnapshot(ctx, snapShotID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err)
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapShotID, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -206,7 +206,7 @@ func createCloneFromSnapshot(
|
||||
if err != nil {
|
||||
if !isCloneRetryError(err) {
|
||||
if dErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true); dErr != nil {
|
||||
util.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, dErr)
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, dErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -214,7 +214,7 @@ func createCloneFromSnapshot(
|
||||
|
||||
cloneState, err := volOptions.getCloneState(ctx, volumeID(vID.FsSubvolName))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get clone state: %v", err)
|
||||
log.ErrorLog(ctx, "failed to get clone state: %v", err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -227,7 +227,7 @@ func createCloneFromSnapshot(
|
||||
// in the new cloned volume too. Till then we are explicitly making the size set
|
||||
err = volOptions.resizeVolume(ctx, volumeID(vID.FsSubvolName), volOptions.Size)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err)
|
||||
log.ErrorLog(ctx, "failed to expand volume %s with error: %v", vID.FsSubvolName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -238,7 +238,7 @@ func createCloneFromSnapshot(
|
||||
func (vo *volumeOptions) getCloneState(ctx context.Context, volID volumeID) (cephFSCloneState, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"could not get FSAdmin, can get clone status for volume %s with ID %s: %v",
|
||||
vo.FsName,
|
||||
@ -250,7 +250,7 @@ func (vo *volumeOptions) getCloneState(ctx context.Context, volID volumeID) (cep
|
||||
|
||||
cs, err := fsa.CloneStatus(vo.FsName, vo.SubvolumeGroup, string(volID))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get clone state for volume %s with ID %s: %v", vo.FsName, string(volID), err)
|
||||
log.ErrorLog(ctx, "could not get clone state for volume %s with ID %s: %v", vo.FsName, string(volID), err)
|
||||
|
||||
return cephFSCloneError, err
|
||||
}
|
||||
|
@ -23,6 +23,7 @@ import (
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
@ -59,7 +60,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
var err error
|
||||
if sID != nil {
|
||||
if err = cs.OperationLocks.GetRestoreLock(sID.SnapshotID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
@ -67,7 +68,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
|
||||
err = createCloneFromSnapshot(ctx, parentVolOpt, volOptions, vID, sID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
|
||||
log.ErrorLog(ctx, "failed to create clone from snapshot %s: %v", sID.FsSnapshotName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -76,7 +77,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
}
|
||||
if parentVolOpt != nil {
|
||||
if err = cs.OperationLocks.GetCloneLock(pvID.VolumeID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
@ -88,7 +89,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
volOptions,
|
||||
parentVolOpt)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", volumeID(pvID.FsSubvolName), err)
|
||||
log.ErrorLog(ctx, "failed to create clone from subvolume %s: %v", volumeID(pvID.FsSubvolName), err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -97,7 +98,7 @@ func (cs *ControllerServer) createBackingVolume(
|
||||
}
|
||||
|
||||
if err = createVolume(ctx, volOptions, volumeID(vID.FsSubvolName), volOptions.Size); err != nil {
|
||||
util.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err)
|
||||
log.ErrorLog(ctx, "failed to create volume %s: %v", volOptions.RequestName, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -150,7 +151,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
ctx context.Context,
|
||||
req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
|
||||
if err := cs.validateCreateVolumeRequest(req); err != nil {
|
||||
util.ErrorLog(ctx, "CreateVolumeRequest validation failed: %v", err)
|
||||
log.ErrorLog(ctx, "CreateVolumeRequest validation failed: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@ -161,7 +162,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
|
||||
cr, err := util.NewAdminCredentials(secret)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
|
||||
log.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
@ -169,7 +170,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
|
||||
// Existence and conflict checks
|
||||
if acquired := cs.VolumeLocks.TryAcquire(requestName); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, requestName)
|
||||
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, requestName)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, requestName)
|
||||
}
|
||||
@ -177,7 +178,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
|
||||
volOptions, err := newVolumeOptions(ctx, requestName, req, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
@ -216,7 +217,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
if err != nil {
|
||||
purgeErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), false)
|
||||
if purgeErr != nil {
|
||||
util.ErrorLog(ctx, "failed to delete volume %s: %v", requestName, purgeErr)
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", requestName, purgeErr)
|
||||
// All errors other than ErrVolumeNotFound should return an error back to the caller
|
||||
if !errors.Is(purgeErr, ErrVolumeNotFound) {
|
||||
return nil, status.Error(codes.Internal, purgeErr.Error())
|
||||
@ -224,10 +225,10 @@ func (cs *ControllerServer) CreateVolume(
|
||||
}
|
||||
errUndo := undoVolReservation(ctx, volOptions, *vID, secret)
|
||||
if errUndo != nil {
|
||||
util.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
|
||||
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
|
||||
requestName, errUndo)
|
||||
}
|
||||
util.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(vID.FsSubvolName), err)
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(vID.FsSubvolName), err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -264,7 +265,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
if !isCloneRetryError(err) {
|
||||
errDefer := undoVolReservation(ctx, volOptions, *vID, secret)
|
||||
if errDefer != nil {
|
||||
util.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
|
||||
log.WarningLog(ctx, "failed undoing reservation of volume: %s (%s)",
|
||||
requestName, errDefer)
|
||||
}
|
||||
}
|
||||
@ -285,7 +286,7 @@ func (cs *ControllerServer) CreateVolume(
|
||||
if err != nil {
|
||||
purgeErr := volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), true)
|
||||
if purgeErr != nil {
|
||||
util.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr)
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vID.FsSubvolName, purgeErr)
|
||||
// All errors other than ErrVolumeNotFound should return an error back to the caller
|
||||
if !errors.Is(purgeErr, ErrVolumeNotFound) {
|
||||
// If the subvolume deletion is failed, we should not cleanup
|
||||
@ -297,12 +298,12 @@ func (cs *ControllerServer) CreateVolume(
|
||||
return nil, status.Error(codes.Internal, purgeErr.Error())
|
||||
}
|
||||
}
|
||||
util.ErrorLog(ctx, "failed to get subvolume path %s: %v", vID.FsSubvolName, err)
|
||||
log.ErrorLog(ctx, "failed to get subvolume path %s: %v", vID.FsSubvolName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "cephfs: successfully created backing volume named %s for request name %s",
|
||||
log.DebugLog(ctx, "cephfs: successfully created backing volume named %s for request name %s",
|
||||
vID.FsSubvolName, requestName)
|
||||
volumeContext := req.GetParameters()
|
||||
volumeContext["subvolumeName"] = vID.FsSubvolName
|
||||
@ -330,7 +331,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
ctx context.Context,
|
||||
req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
|
||||
if err := cs.validateDeleteVolumeRequest(); err != nil {
|
||||
util.ErrorLog(ctx, "DeleteVolumeRequest validation failed: %v", err)
|
||||
log.ErrorLog(ctx, "DeleteVolumeRequest validation failed: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@ -340,7 +341,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
|
||||
// lock out parallel delete operations
|
||||
if acquired := cs.VolumeLocks.TryAcquire(string(volID)); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, string(volID))
|
||||
}
|
||||
@ -348,7 +349,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
|
||||
// lock out volumeID for clone and expand operation
|
||||
if err := cs.OperationLocks.GetDeleteLock(req.GetVolumeId()); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
@ -360,7 +361,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
// if error is ErrPoolNotFound, the pool is already deleted we dont
|
||||
// need to worry about deleting subvolume or omap data, return success
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
util.WarningLog(ctx, "failed to get backend volume for %s: %v", string(volID), err)
|
||||
log.WarningLog(ctx, "failed to get backend volume for %s: %v", string(volID), err)
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
@ -371,7 +372,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
|
||||
util.ErrorLog(ctx, "Error returned from newVolumeOptionsFromVolID: %v", err)
|
||||
log.ErrorLog(ctx, "Error returned from newVolumeOptionsFromVolID: %v", err)
|
||||
|
||||
// All errors other than ErrVolumeNotFound should return an error back to the caller
|
||||
if !errors.Is(err, ErrVolumeNotFound) {
|
||||
@ -404,14 +405,14 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
// Deleting a volume requires admin credentials
|
||||
cr, err := util.NewAdminCredentials(secrets)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
|
||||
log.ErrorLog(ctx, "failed to retrieve admin credentials: %v", err)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
defer cr.DeleteCredentials()
|
||||
|
||||
if err = volOptions.purgeVolume(ctx, volumeID(vID.FsSubvolName), false); err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err)
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", volID, err)
|
||||
if errors.Is(err, ErrVolumeHasSnapshots) {
|
||||
return nil, status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
@ -425,7 +426,7 @@ func (cs *ControllerServer) DeleteVolume(
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "cephfs: successfully deleted volume %s", volID)
|
||||
log.DebugLog(ctx, "cephfs: successfully deleted volume %s", volID)
|
||||
|
||||
return &csi.DeleteVolumeResponse{}, nil
|
||||
}
|
||||
@ -454,7 +455,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
ctx context.Context,
|
||||
req *csi.ControllerExpandVolumeRequest) (*csi.ControllerExpandVolumeResponse, error) {
|
||||
if err := cs.validateExpandVolumeRequest(req); err != nil {
|
||||
util.ErrorLog(ctx, "ControllerExpandVolumeRequest validation failed: %v", err)
|
||||
log.ErrorLog(ctx, "ControllerExpandVolumeRequest validation failed: %v", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@ -464,7 +465,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
|
||||
// lock out parallel delete operations
|
||||
if acquired := cs.VolumeLocks.TryAcquire(volID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
}
|
||||
@ -472,7 +473,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
|
||||
// lock out volumeID for clone and delete operation
|
||||
if err := cs.OperationLocks.GetExpandLock(volID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
@ -486,7 +487,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
|
||||
volOptions, volIdentifier, err := newVolumeOptionsFromVolID(ctx, volID, nil, secret)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
log.ErrorLog(ctx, "validation and extraction of volume options failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.InvalidArgument, err.Error())
|
||||
}
|
||||
@ -495,7 +496,7 @@ func (cs *ControllerServer) ControllerExpandVolume(
|
||||
RoundOffSize := util.RoundOffBytes(req.GetCapacityRange().GetRequiredBytes())
|
||||
|
||||
if err = volOptions.resizeVolume(ctx, volumeID(volIdentifier.FsSubvolName), RoundOffSize); err != nil {
|
||||
util.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(volIdentifier.FsSubvolName), err)
|
||||
log.ErrorLog(ctx, "failed to expand volume %s: %v", volumeID(volIdentifier.FsSubvolName), err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -530,14 +531,14 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
sourceVolID := req.GetSourceVolumeId()
|
||||
// Existence and conflict checks
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(requestName); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, requestName)
|
||||
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, requestName)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.SnapshotOperationAlreadyExistsFmt, requestName)
|
||||
}
|
||||
defer cs.SnapshotLocks.Release(requestName)
|
||||
|
||||
if err = cs.OperationLocks.GetSnapshotCreateLock(sourceVolID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
@ -548,7 +549,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
parentVolOptions, vid, err := newVolumeOptionsFromVolID(ctx, sourceVolID, nil, req.GetSecrets())
|
||||
if err != nil {
|
||||
if errors.Is(err, util.ErrPoolNotFound) {
|
||||
util.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err)
|
||||
log.WarningLog(ctx, "failed to get backend volume for %s: %v", sourceVolID, err)
|
||||
|
||||
return nil, status.Error(codes.NotFound, err.Error())
|
||||
}
|
||||
@ -576,7 +577,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
|
||||
// lock out parallel snapshot create operations
|
||||
if acquired := cs.VolumeLocks.TryAcquire(sourceVolID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, sourceVolID)
|
||||
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, sourceVolID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, sourceVolID)
|
||||
}
|
||||
@ -605,7 +606,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
if sid != nil {
|
||||
errDefer := undoSnapReservation(ctx, parentVolOptions, *sid, snapName, cr)
|
||||
if errDefer != nil {
|
||||
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
|
||||
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
|
||||
requestName, errDefer)
|
||||
}
|
||||
}
|
||||
@ -620,7 +621,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
err = parentVolOptions.protectSnapshot(ctx, volumeID(sid.FsSnapshotName), volumeID(vid.FsSubvolName))
|
||||
if err != nil {
|
||||
protected = false
|
||||
util.WarningLog(ctx, "failed to protect snapshot of snapshot: %s (%s)",
|
||||
log.WarningLog(ctx, "failed to protect snapshot of snapshot: %s (%s)",
|
||||
sid.FsSnapshotName, err)
|
||||
}
|
||||
}
|
||||
@ -645,7 +646,7 @@ func (cs *ControllerServer) CreateSnapshot(
|
||||
if err != nil {
|
||||
errDefer := undoSnapReservation(ctx, parentVolOptions, *sID, snapName, cr)
|
||||
if errDefer != nil {
|
||||
util.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
|
||||
log.WarningLog(ctx, "failed undoing reservation of snapshot: %s (%s)",
|
||||
requestName, errDefer)
|
||||
}
|
||||
}
|
||||
@ -672,7 +673,7 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
|
||||
snap := snapshotInfo{}
|
||||
err := volOpt.createSnapshot(ctx, snapID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err)
|
||||
log.ErrorLog(ctx, "failed to create snapshot %s %v", snapID, err)
|
||||
|
||||
return snap, err
|
||||
}
|
||||
@ -680,13 +681,13 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
|
||||
if err != nil {
|
||||
dErr := volOpt.deleteSnapshot(ctx, snapID, volID)
|
||||
if dErr != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s %v", snapID, err)
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s %v", snapID, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
snap, err = volOpt.getSnapshotInfo(ctx, snapID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err)
|
||||
log.ErrorLog(ctx, "failed to get snapshot info %s %v", snapID, err)
|
||||
|
||||
return snap, fmt.Errorf("failed to get snapshot info for snapshot:%s", snapID)
|
||||
}
|
||||
@ -698,7 +699,7 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
|
||||
snap.CreationTime = t
|
||||
err = volOpt.protectSnapshot(ctx, snapID, volID)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err)
|
||||
log.ErrorLog(ctx, "failed to protect snapshot %s %v", snapID, err)
|
||||
}
|
||||
|
||||
return snap, err
|
||||
@ -707,7 +708,7 @@ func doSnapshot(ctx context.Context, volOpt *volumeOptions, subvolumeName, snaps
|
||||
func (cs *ControllerServer) validateSnapshotReq(ctx context.Context, req *csi.CreateSnapshotRequest) error {
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
||||
util.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
log.ErrorLog(ctx, "invalid create snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
|
||||
return err
|
||||
}
|
||||
@ -730,7 +731,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
req *csi.DeleteSnapshotRequest) (*csi.DeleteSnapshotResponse, error) {
|
||||
if err := cs.Driver.ValidateControllerServiceRequest(
|
||||
csi.ControllerServiceCapability_RPC_CREATE_DELETE_SNAPSHOT); err != nil {
|
||||
util.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
log.ErrorLog(ctx, "invalid delete snapshot req: %v", protosanitizer.StripSecrets(req))
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@ -746,7 +747,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
}
|
||||
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(snapshotID); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
||||
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.SnapshotOperationAlreadyExistsFmt, snapshotID)
|
||||
}
|
||||
@ -754,7 +755,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
|
||||
// lock out snapshotID for restore operation
|
||||
if err = cs.OperationLocks.GetDeleteLock(snapshotID); err != nil {
|
||||
util.ErrorLog(ctx, err.Error())
|
||||
log.ErrorLog(ctx, err.Error())
|
||||
|
||||
return nil, status.Error(codes.Aborted, err.Error())
|
||||
}
|
||||
@ -766,7 +767,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
case errors.Is(err, util.ErrPoolNotFound):
|
||||
// if error is ErrPoolNotFound, the pool is already deleted we dont
|
||||
// need to worry about deleting snapshot or omap data, return success
|
||||
util.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
||||
log.WarningLog(ctx, "failed to get backend snapshot for %s: %v", snapshotID, err)
|
||||
|
||||
return &csi.DeleteSnapshotResponse{}, nil
|
||||
case errors.Is(err, util.ErrKeyNotFound):
|
||||
@ -777,7 +778,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
case errors.Is(err, ErrSnapNotFound):
|
||||
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.FsSubvolName, sid.FsSnapshotName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -787,10 +788,10 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
case errors.Is(err, ErrVolumeNotFound):
|
||||
// if the error is ErrVolumeNotFound, the subvolume is already deleted
|
||||
// from backend, Hence undo the omap entries and return success
|
||||
util.ErrorLog(ctx, "Volume not present")
|
||||
log.ErrorLog(ctx, "Volume not present")
|
||||
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.FsSubvolName, sid.FsSnapshotName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -806,7 +807,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
// safeguard against parallel create or delete requests against the same
|
||||
// name
|
||||
if acquired := cs.SnapshotLocks.TryAcquire(sid.RequestName); !acquired {
|
||||
util.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, sid.RequestName)
|
||||
log.ErrorLog(ctx, util.SnapshotOperationAlreadyExistsFmt, sid.RequestName)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, sid.RequestName)
|
||||
}
|
||||
@ -827,7 +828,7 @@ func (cs *ControllerServer) DeleteSnapshot(
|
||||
}
|
||||
err = undoSnapReservation(ctx, volOpt, *sid, sid.FsSnapshotName, cr)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
log.ErrorLog(ctx, "failed to remove reservation for snapname (%s) with backing snap (%s) (%s)",
|
||||
sid.RequestName, sid.FsSnapshotName, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
|
@ -20,6 +20,7 @@ import (
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/journal"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
)
|
||||
@ -93,11 +94,11 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
|
||||
// Configuration
|
||||
if err = loadAvailableMounters(conf); err != nil {
|
||||
util.FatalLogMsg("cephfs: failed to load ceph mounters: %v", err)
|
||||
log.FatalLogMsg("cephfs: failed to load ceph mounters: %v", err)
|
||||
}
|
||||
|
||||
if err = util.WriteCephConfig(); err != nil {
|
||||
util.FatalLogMsg("failed to write ceph configuration file: %v", err)
|
||||
log.FatalLogMsg("failed to write ceph configuration file: %v", err)
|
||||
}
|
||||
|
||||
// Use passed in instance ID, if provided for omap suffix naming
|
||||
@ -112,7 +113,7 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
|
||||
fs.cd = csicommon.NewCSIDriver(conf.DriverName, util.DriverVersion, conf.NodeID)
|
||||
if fs.cd == nil {
|
||||
util.FatalLogMsg("failed to initialize CSI driver")
|
||||
log.FatalLogMsg("failed to initialize CSI driver")
|
||||
}
|
||||
|
||||
if conf.IsControllerServer || !conf.IsNodeServer {
|
||||
@ -134,7 +135,7 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
if conf.IsNodeServer {
|
||||
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
|
||||
if err != nil {
|
||||
util.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg(err.Error())
|
||||
}
|
||||
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
|
||||
}
|
||||
@ -145,7 +146,7 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
if !conf.IsControllerServer && !conf.IsNodeServer {
|
||||
topology, err = util.GetTopologyFromDomainLabels(conf.DomainLabels, conf.NodeID, conf.DriverName)
|
||||
if err != nil {
|
||||
util.FatalLogMsg(err.Error())
|
||||
log.FatalLogMsg(err.Error())
|
||||
}
|
||||
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
|
||||
fs.cs = NewControllerServer(fs.cd)
|
||||
@ -161,14 +162,14 @@ func (fs *Driver) Run(conf *util.Config) {
|
||||
}
|
||||
server.Start(conf.Endpoint, conf.HistogramOption, srv, conf.EnableGRPCMetrics)
|
||||
if conf.EnableGRPCMetrics {
|
||||
util.WarningLogMsg("EnableGRPCMetrics is deprecated")
|
||||
log.WarningLogMsg("EnableGRPCMetrics is deprecated")
|
||||
go util.StartMetricsServer(conf)
|
||||
}
|
||||
if conf.EnableProfiling {
|
||||
if !conf.EnableGRPCMetrics {
|
||||
go util.StartMetricsServer(conf)
|
||||
}
|
||||
util.DebugLogMsg("Registering profiling handler")
|
||||
log.DebugLogMsg("Registering profiling handler")
|
||||
go util.EnableProfiling()
|
||||
}
|
||||
server.Wait()
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
)
|
||||
@ -112,7 +113,7 @@ func checkVolExists(ctx context.Context,
|
||||
if cloneState == cephFSCloneFailed {
|
||||
err = volOptions.purgeVolume(ctx, volumeID(vid.FsSubvolName), true)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
|
||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@ -171,7 +172,7 @@ func checkVolExists(ctx context.Context,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "Found existing volume (%s) with subvolume name (%s) for request (%s)",
|
||||
log.DebugLog(ctx, "Found existing volume (%s) with subvolume name (%s) for request (%s)",
|
||||
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
|
||||
|
||||
if parentVolOpt != nil && pvID != nil {
|
||||
@ -269,7 +270,7 @@ func reserveVol(ctx context.Context, volOptions *volumeOptions, secret map[strin
|
||||
return nil, err
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "Generated Volume ID (%s) and subvolume name (%s) for request name (%s)",
|
||||
log.DebugLog(ctx, "Generated Volume ID (%s) and subvolume name (%s) for request name (%s)",
|
||||
vid.VolumeID, vid.FsSubvolName, volOptions.RequestName)
|
||||
|
||||
return &vid, nil
|
||||
@ -311,7 +312,7 @@ func reserveSnap(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "Generated Snapshot ID (%s) for request name (%s)",
|
||||
log.DebugLog(ctx, "Generated Snapshot ID (%s) for request name (%s)",
|
||||
vid.SnapshotID, snap.RequestName)
|
||||
|
||||
return &vid, nil
|
||||
@ -392,14 +393,14 @@ func checkSnapExists(
|
||||
if err != nil {
|
||||
err = volOptions.deleteSnapshot(ctx, volumeID(snapID), volumeID(parentSubVolName))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err)
|
||||
log.ErrorLog(ctx, "failed to delete snapshot %s: %v", snapID, err)
|
||||
|
||||
return
|
||||
}
|
||||
err = j.UndoReservation(ctx, volOptions.MetadataPool,
|
||||
volOptions.MetadataPool, snapID, snap.RequestName)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "removing reservation failed for snapshot %s: %v", snapID, err)
|
||||
log.ErrorLog(ctx, "removing reservation failed for snapshot %s: %v", snapID, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
@ -415,7 +416,7 @@ func checkSnapExists(
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
util.DebugLog(ctx, "Found existing snapshot (%s) with subvolume name (%s) for request (%s)",
|
||||
log.DebugLog(ctx, "Found existing snapshot (%s) with subvolume name (%s) for request (%s)",
|
||||
snapData.ImageAttributes.RequestName, parentSubVolName, sid.FsSnapshotName)
|
||||
|
||||
return sid, &snapInfo, nil
|
||||
|
@ -25,6 +25,7 @@ import (
|
||||
|
||||
csicommon "github.com/ceph/ceph-csi/internal/csi-common"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -81,7 +82,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
volID := volumeID(req.GetVolumeId())
|
||||
|
||||
if acquired := ns.VolumeLocks.TryAcquire(req.GetVolumeId()); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, req.GetVolumeId())
|
||||
}
|
||||
@ -114,13 +115,13 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
|
||||
isMnt, err := util.IsMountPoint(stagingTargetPath)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "stat failed: %v", err)
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if isMnt {
|
||||
util.DebugLog(ctx, "cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
|
||||
log.DebugLog(ctx, "cephfs: volume %s is already mounted to %s, skipping", volID, stagingTargetPath)
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
@ -130,7 +131,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
return nil, err
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath)
|
||||
log.DebugLog(ctx, "cephfs: successfully mounted volume %s to %s", volID, stagingTargetPath)
|
||||
|
||||
return &csi.NodeStageVolumeResponse{}, nil
|
||||
}
|
||||
@ -141,7 +142,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
|
||||
cr, err := getCredentialsForVolume(volOptions, req)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get ceph credentials for volume %s: %v", volID, err)
|
||||
log.ErrorLog(ctx, "failed to get ceph credentials for volume %s: %v", volID, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -149,12 +150,12 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
|
||||
m, err := newMounter(volOptions)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
|
||||
log.ErrorLog(ctx, "failed to create mounter for volume %s: %v", volID, err)
|
||||
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.name())
|
||||
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, m.name())
|
||||
|
||||
readOnly := "ro"
|
||||
fuseMountOptions := strings.Split(volOptions.FuseMountOptions, ",")
|
||||
@ -177,7 +178,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
}
|
||||
|
||||
if err = m.mount(ctx, stagingTargetPath, cr, volOptions); err != nil {
|
||||
util.ErrorLog(ctx,
|
||||
log.ErrorLog(ctx,
|
||||
"failed to mount volume %s: %v Check dmesg logs if required.",
|
||||
volID,
|
||||
err)
|
||||
@ -189,7 +190,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
// #nosec - allow anyone to write inside the stagingtarget path
|
||||
err = os.Chmod(stagingTargetPath, 0o777)
|
||||
if err != nil {
|
||||
util.ErrorLog(
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to change stagingtarget path %s permission for volume %s: %v",
|
||||
stagingTargetPath,
|
||||
@ -197,7 +198,7 @@ func (*NodeServer) mount(ctx context.Context, volOptions *volumeOptions, req *cs
|
||||
err)
|
||||
uErr := unmountVolume(ctx, stagingTargetPath)
|
||||
if uErr != nil {
|
||||
util.ErrorLog(
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to umount stagingtarget path %s for volume %s: %v",
|
||||
stagingTargetPath,
|
||||
@ -229,7 +230,7 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
// are serialized, we dont need any extra locking in nodePublish
|
||||
|
||||
if err := util.CreateMountPoint(targetPath); err != nil {
|
||||
util.ErrorLog(ctx, "failed to create mount point at %s: %v", targetPath, err)
|
||||
log.ErrorLog(ctx, "failed to create mount point at %s: %v", targetPath, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
@ -244,13 +245,13 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
|
||||
isMnt, err := util.IsMountPoint(targetPath)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "stat failed: %v", err)
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if isMnt {
|
||||
util.DebugLog(ctx, "cephfs: volume %s is already bind-mounted to %s", volID, targetPath)
|
||||
log.DebugLog(ctx, "cephfs: volume %s is already bind-mounted to %s", volID, targetPath)
|
||||
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
@ -258,12 +259,12 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
// It's not, mount now
|
||||
|
||||
if err = bindMount(ctx, req.GetStagingTargetPath(), req.GetTargetPath(), req.GetReadonly(), mountOptions); err != nil {
|
||||
util.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err)
|
||||
log.ErrorLog(ctx, "failed to bind-mount volume %s: %v", volID, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "cephfs: successfully bind-mounted volume %s to %s", volID, targetPath)
|
||||
log.DebugLog(ctx, "cephfs: successfully bind-mounted volume %s to %s", volID, targetPath)
|
||||
|
||||
return &csi.NodePublishVolumeResponse{}, nil
|
||||
}
|
||||
@ -283,7 +284,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// targetPath has already been deleted
|
||||
util.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath)
|
||||
log.DebugLog(ctx, "targetPath: %s has already been deleted", targetPath)
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
@ -308,7 +309,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "cephfs: successfully unbounded volume %s from %s", req.GetVolumeId(), targetPath)
|
||||
log.DebugLog(ctx, "cephfs: successfully unbounded volume %s from %s", req.GetVolumeId(), targetPath)
|
||||
|
||||
return &csi.NodeUnpublishVolumeResponse{}, nil
|
||||
}
|
||||
@ -324,7 +325,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
|
||||
volID := req.GetVolumeId()
|
||||
if acquired := ns.VolumeLocks.TryAcquire(volID); !acquired {
|
||||
util.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
log.ErrorLog(ctx, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
|
||||
return nil, status.Errorf(codes.Aborted, util.VolumeOperationAlreadyExistsFmt, volID)
|
||||
}
|
||||
@ -336,7 +337,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
// targetPath has already been deleted
|
||||
util.DebugLog(ctx, "targetPath: %s has already been deleted", stagingTargetPath)
|
||||
log.DebugLog(ctx, "targetPath: %s has already been deleted", stagingTargetPath)
|
||||
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
@ -351,7 +352,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
util.DebugLog(ctx, "cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
|
||||
log.DebugLog(ctx, "cephfs: successfully unmounted volume %s from %s", req.GetVolumeId(), stagingTargetPath)
|
||||
|
||||
return &csi.NodeUnstageVolumeResponse{}, nil
|
||||
}
|
||||
|
@ -21,7 +21,7 @@ import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/ceph/go-ceph/cephfs/admin"
|
||||
"github.com/ceph/go-ceph/rados"
|
||||
@ -51,14 +51,14 @@ type cephfsSnapshot struct {
|
||||
func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volumeID) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = fsa.CreateSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create subvolume snapshot %s %s in fs %s: %s",
|
||||
log.ErrorLog(ctx, "failed to create subvolume snapshot %s %s in fs %s: %s",
|
||||
string(snapID), string(volID), vo.FsName, err)
|
||||
|
||||
return err
|
||||
@ -70,14 +70,14 @@ func (vo *volumeOptions) createSnapshot(ctx context.Context, snapID, volID volum
|
||||
func (vo *volumeOptions) deleteSnapshot(ctx context.Context, snapID, volID volumeID) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
err = fsa.ForceRemoveSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to delete subvolume snapshot %s %s in fs %s: %s",
|
||||
log.ErrorLog(ctx, "failed to delete subvolume snapshot %s %s in fs %s: %s",
|
||||
string(snapID), string(volID), vo.FsName, err)
|
||||
|
||||
return err
|
||||
@ -97,7 +97,7 @@ func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volu
|
||||
snap := snapshotInfo{}
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return snap, err
|
||||
}
|
||||
@ -107,7 +107,7 @@ func (vo *volumeOptions) getSnapshotInfo(ctx context.Context, snapID, volID volu
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return snap, ErrSnapNotFound
|
||||
}
|
||||
util.ErrorLog(
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to get subvolume snapshot info %s %s in fs %s with error %s",
|
||||
string(volID),
|
||||
@ -132,7 +132,7 @@ func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volu
|
||||
}
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -143,7 +143,7 @@ func (vo *volumeOptions) protectSnapshot(ctx context.Context, snapID, volID volu
|
||||
if errors.Is(err, rados.ErrObjectExists) {
|
||||
return nil
|
||||
}
|
||||
util.ErrorLog(
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to protect subvolume snapshot %s %s in fs %s with error: %s",
|
||||
string(volID),
|
||||
@ -165,7 +165,7 @@ func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID vo
|
||||
}
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -178,7 +178,7 @@ func (vo *volumeOptions) unprotectSnapshot(ctx context.Context, snapID, volID vo
|
||||
if errors.Is(err, rados.ErrObjectExists) {
|
||||
return nil
|
||||
}
|
||||
util.ErrorLog(
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to unprotect subvolume snapshot %s %s in fs %s with error: %s",
|
||||
string(volID),
|
||||
@ -199,7 +199,7 @@ func (vo *volumeOptions) cloneSnapshot(
|
||||
) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin: %s", err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -212,7 +212,7 @@ func (vo *volumeOptions) cloneSnapshot(
|
||||
|
||||
err = fsa.CloneSubVolumeSnapshot(vo.FsName, vo.SubvolumeGroup, string(volID), string(snapID), string(cloneID), co)
|
||||
if err != nil {
|
||||
util.ErrorLog(
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to clone subvolume snapshot %s %s in fs %s with error: %s",
|
||||
string(volID),
|
||||
|
@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
@ -130,7 +131,7 @@ func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (sn
|
||||
|
||||
cephfsSnap.Monitors, cephfsSnap.ClusterID, err = util.GetMonsAndClusterID(snapOptions)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
log.ErrorLog(ctx, "failed getting mons (%s)", err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
@ -144,7 +145,7 @@ func genSnapFromOptions(ctx context.Context, req *csi.CreateSnapshotRequest) (sn
|
||||
func parseTime(ctx context.Context, createTime time.Time) (*timestamp.Timestamp, error) {
|
||||
tm, err := ptypes.TimestampProto(createTime)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to convert time %s %v", createTime, err)
|
||||
log.ErrorLog(ctx, "failed to convert time %s %v", createTime, err)
|
||||
|
||||
return tm, err
|
||||
}
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
fsAdmin "github.com/ceph/go-ceph/cephfs/admin"
|
||||
"github.com/ceph/go-ceph/rados"
|
||||
@ -59,13 +60,13 @@ func getVolumeRootPathCephDeprecated(volID volumeID) string {
|
||||
func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volumeID) (string, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin err %s", err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin err %s", err)
|
||||
|
||||
return "", err
|
||||
}
|
||||
svPath, err := fsa.SubVolumePath(vo.FsName, vo.SubvolumeGroup, string(volID))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get the rootpath for the vol %s: %s", string(volID), err)
|
||||
log.ErrorLog(ctx, "failed to get the rootpath for the vol %s: %s", string(volID), err)
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return "", util.JoinErrors(ErrVolumeNotFound, err)
|
||||
}
|
||||
@ -79,14 +80,14 @@ func (vo *volumeOptions) getVolumeRootPathCeph(ctx context.Context, volID volume
|
||||
func (vo *volumeOptions) getSubVolumeInfo(ctx context.Context, volID volumeID) (*Subvolume, error) {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not fetch metadata pool for %s:", vo.FsName, err)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info, err := fsa.SubVolumeInfo(vo.FsName, vo.SubvolumeGroup, string(volID))
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to get subvolume info for the vol %s: %s", string(volID), err)
|
||||
log.ErrorLog(ctx, "failed to get subvolume info for the vol %s: %s", string(volID), err)
|
||||
if errors.Is(err, rados.ErrNotFound) {
|
||||
return nil, ErrVolumeNotFound
|
||||
}
|
||||
@ -148,7 +149,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
|
||||
|
||||
ca, err := volOptions.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not create subvolume %s: %s", string(volID), err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not create subvolume %s: %s", string(volID), err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -158,7 +159,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
|
||||
opts := fsAdmin.SubVolumeGroupOptions{}
|
||||
err = ca.CreateSubVolumeGroup(volOptions.FsName, volOptions.SubvolumeGroup, &opts)
|
||||
if err != nil {
|
||||
util.ErrorLog(
|
||||
log.ErrorLog(
|
||||
ctx,
|
||||
"failed to create subvolume group %s, for the vol %s: %s",
|
||||
volOptions.SubvolumeGroup,
|
||||
@ -167,7 +168,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
|
||||
|
||||
return err
|
||||
}
|
||||
util.DebugLog(ctx, "cephfs: created subvolume group %s", volOptions.SubvolumeGroup)
|
||||
log.DebugLog(ctx, "cephfs: created subvolume group %s", volOptions.SubvolumeGroup)
|
||||
clusterAdditionalInfo[volOptions.ClusterID].subVolumeGroupCreated = true
|
||||
}
|
||||
|
||||
@ -182,7 +183,7 @@ func createVolume(ctx context.Context, volOptions *volumeOptions, volID volumeID
|
||||
// FIXME: check if the right credentials are used ("-n", cephEntityClientPrefix + cr.ID)
|
||||
err = ca.CreateSubVolume(volOptions.FsName, volOptions.SubvolumeGroup, string(volID), &opts)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to create subvolume %s in fs %s: %s", string(volID), volOptions.FsName, err)
|
||||
log.ErrorLog(ctx, "failed to create subvolume %s in fs %s: %s", string(volID), volOptions.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -207,7 +208,7 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
|
||||
clusterAdditionalInfo[vo.ClusterID].resizeState == supported {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin, can not resize volume %s:", vo.FsName, err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin, can not resize volume %s:", vo.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -220,7 +221,7 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
|
||||
var invalid fsAdmin.NotImplementedError
|
||||
// In case the error is other than invalid command return error to the caller.
|
||||
if !errors.As(err, &invalid) {
|
||||
util.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
|
||||
log.ErrorLog(ctx, "failed to resize subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -233,7 +234,7 @@ func (vo *volumeOptions) resizeVolume(ctx context.Context, volID volumeID, bytes
|
||||
func (vo *volumeOptions) purgeVolume(ctx context.Context, volID volumeID, force bool) error {
|
||||
fsa, err := vo.conn.GetFSAdmin()
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "could not get FSAdmin %s:", err)
|
||||
log.ErrorLog(ctx, "could not get FSAdmin %s:", err)
|
||||
|
||||
return err
|
||||
}
|
||||
@ -247,7 +248,7 @@ func (vo *volumeOptions) purgeVolume(ctx context.Context, volID volumeID, force
|
||||
|
||||
err = fsa.RemoveSubVolumeWithFlags(vo.FsName, vo.SubvolumeGroup, string(volID), opt)
|
||||
if err != nil {
|
||||
util.ErrorLog(ctx, "failed to purge subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
|
||||
log.ErrorLog(ctx, "failed to purge subvolume %s in fs %s: %s", string(volID), vo.FsName, err)
|
||||
if strings.Contains(err.Error(), volumeNotEmpty) {
|
||||
return util.JoinErrors(ErrVolumeHasSnapshots, err)
|
||||
}
|
||||
|
@ -28,6 +28,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -75,7 +76,7 @@ func loadAvailableMounters(conf *util.Config) error {
|
||||
|
||||
err := kernelMounterProbe.Run()
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("failed to run mount.ceph %v", err)
|
||||
log.ErrorLogMsg("failed to run mount.ceph %v", err)
|
||||
} else {
|
||||
// fetch the current running kernel info
|
||||
release, kvErr := util.GetKernelVersion()
|
||||
@ -84,18 +85,18 @@ func loadAvailableMounters(conf *util.Config) error {
|
||||
}
|
||||
|
||||
if conf.ForceKernelCephFS || util.CheckKernelSupport(release, quotaSupport) {
|
||||
util.DefaultLog("loaded mounter: %s", volumeMounterKernel)
|
||||
log.DefaultLog("loaded mounter: %s", volumeMounterKernel)
|
||||
availableMounters = append(availableMounters, volumeMounterKernel)
|
||||
} else {
|
||||
util.DefaultLog("kernel version < 4.17 might not support quota feature, hence not loading kernel client")
|
||||
log.DefaultLog("kernel version < 4.17 might not support quota feature, hence not loading kernel client")
|
||||
}
|
||||
}
|
||||
|
||||
err = fuseMounterProbe.Run()
|
||||
if err != nil {
|
||||
util.ErrorLogMsg("failed to run ceph-fuse %v", err)
|
||||
log.ErrorLogMsg("failed to run ceph-fuse %v", err)
|
||||
} else {
|
||||
util.DefaultLog("loaded mounter: %s", volumeMounterFuse)
|
||||
log.DefaultLog("loaded mounter: %s", volumeMounterFuse)
|
||||
availableMounters = append(availableMounters, volumeMounterFuse)
|
||||
}
|
||||
|
||||
@ -131,7 +132,7 @@ func newMounter(volOptions *volumeOptions) (volumeMounter, error) {
|
||||
if chosenMounter == "" {
|
||||
// Otherwise pick whatever is left
|
||||
chosenMounter = availableMounters[0]
|
||||
util.DebugLogMsg("requested mounter: %s, chosen mounter: %s", wantMounter, chosenMounter)
|
||||
log.DebugLogMsg("requested mounter: %s, chosen mounter: %s", wantMounter, chosenMounter)
|
||||
}
|
||||
|
||||
// Create the mounter
|
||||
@ -291,10 +292,10 @@ func unmountVolume(ctx context.Context, mountPoint string) error {
|
||||
if ok {
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
util.WarningLog(ctx, "failed to find process %d: %v", pid, err)
|
||||
log.WarningLog(ctx, "failed to find process %d: %v", pid, err)
|
||||
} else {
|
||||
if _, err = p.Wait(); err != nil {
|
||||
util.WarningLog(ctx, "%d is not a child process: %v", pid, err)
|
||||
log.WarningLog(ctx, "%d is not a child process: %v", pid, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user