mirror of
https://github.com/ceph/ceph-csi.git
synced 2024-11-17 20:00:23 +00:00
cleanup: create k8s.io/mount-utils Mounter only once
Recently the k8s.io/mount-utils package added more runtime dectection. When creating a new Mounter, the detect is run every time. This is unfortunate, as it logs a message like the following: ``` mount_linux.go:283] Detected umount with safe 'not mounted' behavior ``` This message might be useful, so it probably good to keep it. In Ceph-CSI there are various locations where Mounter instances are created. Moving that to the DefaultNodeServer type reduces it to a single place. Some utility functions need to accept the additional parameter too, so that has been modified as well. See-also: kubernetes/kubernetes#109676 Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
parent
5ed305850f
commit
011d4fc81c
@ -51,8 +51,8 @@ func (ms mountState) String() string {
|
||||
}[int(ms)]
|
||||
}
|
||||
|
||||
func getMountState(path string) (mountState, error) {
|
||||
isMnt, err := util.IsMountPoint(path)
|
||||
func (ns *NodeServer) getMountState(path string) (mountState, error) {
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, path)
|
||||
if err != nil {
|
||||
if util.IsCorruptedMountError(err) {
|
||||
return msCorrupted, nil
|
||||
@ -117,12 +117,12 @@ func (ns *NodeServer) tryRestoreFuseMountsInNodePublish(
|
||||
) error {
|
||||
// Check if there is anything to restore.
|
||||
|
||||
stagingTargetMs, err := getMountState(stagingTargetPath)
|
||||
stagingTargetMs, err := ns.getMountState(stagingTargetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
targetMs, err := getMountState(targetPath)
|
||||
targetMs, err := ns.getMountState(targetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -230,7 +230,7 @@ func (ns *NodeServer) tryRestoreFuseMountInNodeStage(
|
||||
) error {
|
||||
// Check if there is anything to restore.
|
||||
|
||||
stagingTargetMs, err := getMountState(stagingTargetPath)
|
||||
stagingTargetMs, err := ns.getMountState(stagingTargetPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ func (ns *NodeServer) NodeStageVolume(
|
||||
return nil, status.Errorf(codes.Internal, "failed to try to restore FUSE mounts: %v", err)
|
||||
}
|
||||
|
||||
isMnt, err := util.IsMountPoint(stagingTargetPath)
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
@ -426,7 +426,7 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
|
||||
// Ensure staging target path is a mountpoint.
|
||||
|
||||
if isMnt, err := util.IsMountPoint(stagingTargetPath); err != nil {
|
||||
if isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath); err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
@ -438,7 +438,7 @@ func (ns *NodeServer) NodePublishVolume(
|
||||
|
||||
// Check if the volume is already mounted
|
||||
|
||||
isMnt, err := util.IsMountPoint(targetPath)
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, targetPath)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
@ -482,7 +482,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
|
||||
// considering kubelet make sure node operations like unpublish/unstage...etc can not be called
|
||||
// at same time, an explicit locking at time of nodeunpublish is not required.
|
||||
targetPath := req.GetTargetPath()
|
||||
isMnt, err := util.IsMountPoint(targetPath)
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, targetPath)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
@ -551,7 +551,7 @@ func (ns *NodeServer) NodeUnstageVolume(
|
||||
return nil, status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
isMnt, err := util.IsMountPoint(stagingTargetPath)
|
||||
isMnt, err := util.IsMountPoint(ns.Mounter, stagingTargetPath)
|
||||
if err != nil {
|
||||
log.ErrorLog(ctx, "stat failed: %v", err)
|
||||
|
||||
@ -637,7 +637,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
|
||||
}
|
||||
|
||||
if stat.Mode().IsDir() {
|
||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, targetPath)
|
||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
|
||||
}
|
||||
|
||||
return nil, status.Errorf(codes.InvalidArgument, "targetpath %q is not a directory or device", targetPath)
|
||||
|
@ -24,12 +24,14 @@ import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
// DefaultNodeServer stores driver object.
|
||||
type DefaultNodeServer struct {
|
||||
Driver *CSIDriver
|
||||
Type string
|
||||
Driver *CSIDriver
|
||||
Type string
|
||||
Mounter mount.Interface
|
||||
}
|
||||
|
||||
// NodeExpandVolume returns unimplemented response.
|
||||
|
@ -38,6 +38,7 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
func parseEndpoint(ep string) (string, string, error) {
|
||||
@ -61,8 +62,9 @@ func NewDefaultNodeServer(d *CSIDriver, t string, topology map[string]string) *D
|
||||
d.topology = topology
|
||||
|
||||
return &DefaultNodeServer{
|
||||
Driver: d,
|
||||
Type: t,
|
||||
Driver: d,
|
||||
Type: t,
|
||||
Mounter: mount.New(""),
|
||||
}
|
||||
}
|
||||
|
||||
@ -229,8 +231,12 @@ func panicHandler(
|
||||
// requested by the NodeGetVolumeStats CSI procedure.
|
||||
// It is shared for FileMode volumes, both the CephFS and RBD NodeServers call
|
||||
// this.
|
||||
func FilesystemNodeGetVolumeStats(ctx context.Context, targetPath string) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
isMnt, err := util.IsMountPoint(targetPath)
|
||||
func FilesystemNodeGetVolumeStats(
|
||||
ctx context.Context,
|
||||
mounter mount.Interface,
|
||||
targetPath string,
|
||||
) (*csi.NodeGetVolumeStatsResponse, error) {
|
||||
isMnt, err := util.IsMountPoint(mounter, targetPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "targetpath %s does not exist", targetPath)
|
||||
|
@ -26,6 +26,7 @@ import (
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
var fakeID = "fake-id"
|
||||
@ -87,7 +88,7 @@ func TestFilesystemNodeGetVolumeStats(t *testing.T) {
|
||||
|
||||
// retry until a mountpoint is found
|
||||
for {
|
||||
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), cwd)
|
||||
stats, err := FilesystemNodeGetVolumeStats(context.TODO(), mount.New(""), cwd)
|
||||
if err != nil && cwd != "/" && strings.HasSuffix(err.Error(), "is not mounted") {
|
||||
// try again with the parent directory
|
||||
cwd = filepath.Dir(cwd)
|
||||
|
@ -29,7 +29,6 @@ import (
|
||||
"github.com/ceph/ceph-csi/internal/util/log"
|
||||
|
||||
"github.com/container-storage-interface/spec/lib/go/csi"
|
||||
mount "k8s.io/mount-utils"
|
||||
)
|
||||
|
||||
// Driver contains the default identity,node and controller struct.
|
||||
@ -73,11 +72,8 @@ func NewReplicationServer(c *rbd.ControllerServer) *rbd.ReplicationServer {
|
||||
|
||||
// NewNodeServer initialize a node server for rbd CSI driver.
|
||||
func NewNodeServer(d *csicommon.CSIDriver, t string, topology map[string]string) (*rbd.NodeServer, error) {
|
||||
mounter := mount.New("")
|
||||
|
||||
return &rbd.NodeServer{
|
||||
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
|
||||
Mounter: mounter,
|
||||
VolumeLocks: util.NewVolumeLocks(),
|
||||
}, nil
|
||||
}
|
||||
|
@ -42,7 +42,6 @@ import (
|
||||
// node server spec.
|
||||
type NodeServer struct {
|
||||
*csicommon.DefaultNodeServer
|
||||
Mounter mount.Interface
|
||||
// A map storing all volumes with ongoing operations so that additional operations
|
||||
// for that same volume (as defined by VolumeID) return an Aborted error
|
||||
VolumeLocks *util.VolumeLocks
|
||||
@ -806,7 +805,7 @@ func (ns *NodeServer) mountVolume(ctx context.Context, stagingPath string, req *
|
||||
if readOnly {
|
||||
mountOptions = append(mountOptions, "ro")
|
||||
}
|
||||
if err := util.Mount(stagingPath, targetPath, fsType, mountOptions); err != nil {
|
||||
if err := util.Mount(ns.Mounter, stagingPath, targetPath, fsType, mountOptions); err != nil {
|
||||
return status.Error(codes.Internal, err.Error())
|
||||
}
|
||||
|
||||
@ -1241,7 +1240,7 @@ func (ns *NodeServer) NodeGetVolumeStats(
|
||||
}
|
||||
|
||||
if stat.Mode().IsDir() {
|
||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, targetPath)
|
||||
return csicommon.FilesystemNodeGetVolumeStats(ctx, ns.Mounter, targetPath)
|
||||
} else if (stat.Mode() & os.ModeDevice) == os.ModeDevice {
|
||||
return blockNodeGetVolumeStats(ctx, targetPath)
|
||||
}
|
||||
|
@ -325,9 +325,8 @@ func checkDirExists(p string) bool {
|
||||
}
|
||||
|
||||
// IsMountPoint checks if the given path is mountpoint or not.
|
||||
func IsMountPoint(p string) (bool, error) {
|
||||
dummyMount := mount.New("")
|
||||
notMnt, err := dummyMount.IsLikelyNotMountPoint(p)
|
||||
func IsMountPoint(mounter mount.Interface, p string) (bool, error) {
|
||||
notMnt, err := mounter.IsLikelyNotMountPoint(p)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -348,10 +347,8 @@ func ReadMountInfoForProc(proc string) ([]mount.MountInfo, error) {
|
||||
}
|
||||
|
||||
// Mount mounts the source to target path.
|
||||
func Mount(source, target, fstype string, options []string) error {
|
||||
dummyMount := mount.New("")
|
||||
|
||||
return dummyMount.MountSensitiveWithoutSystemd(source, target, fstype, options, nil)
|
||||
func Mount(mounter mount.Interface, source, target, fstype string, options []string) error {
|
||||
return mounter.MountSensitiveWithoutSystemd(source, target, fstype, options, nil)
|
||||
}
|
||||
|
||||
// MountOptionsAdd adds the `add` mount options to the `options` and returns a
|
||||
|
Loading…
Reference in New Issue
Block a user