mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-17 20:00:23 +00:00
cleanup: incorrect fuserecovery logging
Signed-off-by: Praveen M <m.praveen@ibm.com>
This commit is contained in:
parent
86bc40fbff
commit
0e61b826ea
@ -24,8 +24,6 @@ import (
|
||||
fsutil "github.com/ceph/ceph-csi/internal/cephfs/util"
|
||||
"github.com/ceph/ceph-csi/internal/util"
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
mountutil "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
type (
|
||||
@ -37,9 +35,6 @@ const (
|
||||
msNotMounted
|
||||
msMounted
|
||||
msCorrupted
|
||||
|
||||
// ceph-fuse fsType in /proc/<PID>/mountinfo.
|
||||
cephFuseFsType = "fuse.ceph-fuse"
|
||||
)
|
||||
|
||||
func (ms mountState) String() string {
|
||||
@ -68,30 +63,6 @@ func (ns *NodeServer) getMountState(path string) (mountState, error) {
|
||||
return msNotMounted, nil
|
||||
}
|
||||
|
||||
func findMountinfo(mountpoint string, minfo []mountutil.MountInfo) int {
|
||||
for i := range minfo {
|
||||
if minfo[i].MountPoint == mountpoint {
|
||||
return i
|
||||
}
|
||||
}
|
||||
|
||||
return -1
|
||||
}
|
||||
|
||||
// Ensures that given mountpoint is of specified fstype.
|
||||
// Returns true if fstype matches, or if no such mountpoint exists.
|
||||
func validateFsType(mountpoint, fsType string, minfo []mountutil.MountInfo) bool {
|
||||
if idx := findMountinfo(mountpoint, minfo); idx > 0 {
|
||||
mi := minfo[idx]
|
||||
|
||||
if mi.FsType != fsType {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// tryRestoreFuseMountsInNodePublish tries to restore staging and publish
|
||||
// volume moutpoints inside the NodePublishVolume call.
|
||||
//
|
||||
@ -158,19 +129,6 @@ func (ns *NodeServer) tryRestoreFuseMountsInNodePublish(
|
||||
volOptions *store.VolumeOptions
|
||||
)
|
||||
|
||||
procMountInfo, err := util.ReadMountInfoForProc("self")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !validateFsType(stagingTargetPath, cephFuseFsType, procMountInfo) ||
|
||||
!validateFsType(targetPath, cephFuseFsType, procMountInfo) {
|
||||
// We can't restore mounts not managed by ceph-fuse.
|
||||
log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery on non-FUSE mountpoints")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
volOptions, err = ns.getVolumeOptions(ctx, volID, volContext, nsMountinfo.Secrets)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -181,13 +139,6 @@ func (ns *NodeServer) tryRestoreFuseMountsInNodePublish(
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := volMounter.(*mounter.FuseMounter); !ok {
|
||||
// We can't restore mounts with non-FUSE mounter.
|
||||
log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery with non-FUSE mounter")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Try to restore mount in staging target path.
|
||||
// Unmount and mount the volume.
|
||||
|
||||
@ -225,7 +176,6 @@ func (ns *NodeServer) tryRestoreFuseMountsInNodePublish(
|
||||
// should be able to continue with mounting the volume normally afterwards.
|
||||
func (ns *NodeServer) tryRestoreFuseMountInNodeStage(
|
||||
ctx context.Context,
|
||||
mnt mounter.VolumeMounter,
|
||||
stagingTargetPath string,
|
||||
) error {
|
||||
// Check if there is anything to restore.
|
||||
@ -245,28 +195,6 @@ func (ns *NodeServer) tryRestoreFuseMountInNodeStage(
|
||||
log.WarningLog(ctx, "cephfs: mountpoint problem detected when staging a volume: %s is %s; attempting recovery",
|
||||
stagingTargetPath, stagingTargetMs)
|
||||
|
||||
// Check that the existing stage mount for this volume is managed by
|
||||
// ceph-fuse, and that the mounter is FuseMounter. Then try to restore them.
|
||||
|
||||
procMountInfo, err := util.ReadMountInfoForProc("self")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !validateFsType(stagingTargetPath, cephFuseFsType, procMountInfo) {
|
||||
// We can't restore mounts not managed by ceph-fuse.
|
||||
log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery on non-FUSE mountpoints")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := mnt.(*mounter.FuseMounter); !ok {
|
||||
// We can't restore mounts with non-FUSE mounter.
|
||||
log.WarningLog(ctx, "cephfs: cannot proceed with mount recovery with non-FUSE mounter")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restoration here means only unmounting the volume.
|
||||
// NodeStageVolume should take care of the rest.
|
||||
return mounter.UnmountAll(ctx, stagingTargetPath)
|
||||
|
@ -212,8 +212,10 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
|
||||
// Check if the volume is already mounted
|
||||
|
||||
if err = ns.tryRestoreFuseMountInNodeStage(ctx, mnt, stagingTargetPath); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
|
||||
if _, ok := mnt.(*mounter.FuseMounter); ok {
|
||||
if err = ns.tryRestoreFuseMountInNodeStage(ctx, stagingTargetPath); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath)
|
||||
@ -446,23 +448,37 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
targetPath := req.GetTargetPath()
|
||||
volID := fsutil.VolumeID(req.GetVolumeId())
|
||||
|
||||
volOptions := &store.VolumeOptions{}
|
||||
defer volOptions.Destroy()
|
||||
|
||||
if err := volOptions.DetectMounter(req.GetVolumeContext()); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to detect mounter for volume %s: %v", volID, err.Error())
|
||||
}
|
||||
|
||||
volMounter, err := mounter.New(volOptions)
|
||||
if err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to create mounter for volume %s: %v", volID, err.Error())
|
||||
}
|
||||
|
||||
// Considering kubelet make sure the stage and publish operations
|
||||
// are serialized, we dont need any extra locking in nodePublish
|
||||
|
||||
if err := util.CreateMountPoint(targetPath); err != nil {
|
||||
if err = util.CreateMountPoint(targetPath); err != nil {
|
||||
log.ErrorLog(ctx, "failed to create mount point at %s: %v", targetPath, err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
if err := ns.tryRestoreFuseMountsInNodePublish(
|
||||
ctx,
|
||||
volID,
|
||||
stagingTargetPath,
|
||||
targetPath,
|
||||
req.GetVolumeContext(),
|
||||
); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
|
||||
if _, ok := volMounter.(*mounter.FuseMounter); ok {
|
||||
if err = ns.tryRestoreFuseMountsInNodePublish(
|
||||
ctx,
|
||||
volID,
|
||||
stagingTargetPath,
|
||||
targetPath,
|
||||
req.GetVolumeContext(),
|
||||
); err != nil {
|
||||
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
if req.GetReadonly() {
|
||||
|
@ -151,6 +151,10 @@ func validateMounter(m string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *VolumeOptions) DetectMounter(options map[string]string) error {
|
||||
return extractMounter(&v.Mounter, options)
|
||||
}
|
||||
|
||||
func extractMounter(dest *string, options map[string]string) error {
|
||||
if err := extractOptionalOption(dest, "mounter", options); err != nil {
|
||||
return err
|
||||
|
@ -340,12 +340,6 @@ func IsCorruptedMountError(err error) bool {
|
||||
return mount.IsCorruptedMnt(err)
|
||||
}
|
||||
|
||||
// ReadMountInfoForProc reads /proc/<PID>/mountpoint and marshals it into
|
||||
// MountInfo structs.
|
||||
func ReadMountInfoForProc(proc string) ([]mount.MountInfo, error) {
|
||||
return mount.ParseMountInfo(fmt.Sprintf("/proc/%s/mountinfo", proc))
|
||||
}
|
||||
|
||||
// Mount mounts the source to target path.
|
||||
func Mount(mounter mount.Interface, source, target, fstype string, options []string) error {
|
||||
return mounter.MountSensitiveWithoutSystemd(source, target, fstype, options, nil)
|
||||
|
Loading…
Reference in New Issue
Block a user