rbd: export NodeServer.mounter outside of the rbd package

NodeServer.mounter is internal to the NodeServer type, but it needs to
be initialized by the rbd-driver. The rbd-driver is moved to its own
package, so .Mounter needs to be available from there in order to set
it.

Signed-off-by: Niels de Vos <ndevos@redhat.com>
This commit is contained in:
Niels de Vos 2021-12-09 09:04:48 +01:00 committed by mergify[bot]
parent 8d09134125
commit 5baf9811f9

View File

@ -42,7 +42,7 @@ import (
// node server spec. // node server spec.
type NodeServer struct { type NodeServer struct {
*csicommon.DefaultNodeServer *csicommon.DefaultNodeServer
mounter mount.Interface Mounter mount.Interface
// A map storing all volumes with ongoing operations so that additional operations // A map storing all volumes with ongoing operations so that additional operations
// for that same volume (as defined by VolumeID) return an Aborted error // for that same volume (as defined by VolumeID) return an Aborted error
VolumeLocks *util.VolumeLocks VolumeLocks *util.VolumeLocks
@ -297,7 +297,7 @@ func (ns *NodeServer) NodeStageVolume(
if !isHealer { if !isHealer {
var isNotMnt bool var isNotMnt bool
// check if stagingPath is already mounted // check if stagingPath is already mounted
isNotMnt, err = isNotMountPoint(ns.mounter, stagingTargetPath) isNotMnt, err = isNotMountPoint(ns.Mounter, stagingTargetPath)
if err != nil { if err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} else if !isNotMnt { } else if !isNotMnt {
@ -507,7 +507,7 @@ func (ns *NodeServer) undoStagingTransaction(
stagingTargetPath := getStagingTargetPath(req) stagingTargetPath := getStagingTargetPath(req)
if transaction.isMounted { if transaction.isMounted {
err = ns.mounter.Unmount(stagingTargetPath) err = ns.Mounter.Unmount(stagingTargetPath)
if err != nil { if err != nil {
log.ErrorLog(ctx, "failed to unmount stagingtargetPath: %s with error: %v", stagingTargetPath, err) log.ErrorLog(ctx, "failed to unmount stagingtargetPath: %s with error: %v", stagingTargetPath, err)
@ -626,7 +626,7 @@ func (ns *NodeServer) mountVolumeToStagePath(
stagingPath, devicePath string) (bool, error) { stagingPath, devicePath string) (bool, error) {
readOnly := false readOnly := false
fsType := req.GetVolumeCapability().GetMount().GetFsType() fsType := req.GetVolumeCapability().GetMount().GetFsType()
diskMounter := &mount.SafeFormatAndMount{Interface: ns.mounter, Exec: utilexec.New()} diskMounter := &mount.SafeFormatAndMount{Interface: ns.Mounter, Exec: utilexec.New()}
// rbd images are thin-provisioned and return zeros for unwritten areas. A freshly created // rbd images are thin-provisioned and return zeros for unwritten areas. A freshly created
// image will not benefit from discard and we also want to avoid as much unnecessary zeroing // image will not benefit from discard and we also want to avoid as much unnecessary zeroing
// as possible. Open-code mkfs here because FormatAndMount() doesn't accept custom mkfs // as possible. Open-code mkfs here because FormatAndMount() doesn't accept custom mkfs
@ -730,7 +730,7 @@ func (ns *NodeServer) mountVolume(ctx context.Context, stagingPath string, req *
func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath string, isBlock bool) (bool, error) { func (ns *NodeServer) createTargetMountPath(ctx context.Context, mountPath string, isBlock bool) (bool, error) {
// Check if that mount path exists properly // Check if that mount path exists properly
notMnt, err := mount.IsNotMountPoint(ns.mounter, mountPath) notMnt, err := mount.IsNotMountPoint(ns.Mounter, mountPath)
if err == nil { if err == nil {
return notMnt, nil return notMnt, nil
} }
@ -773,7 +773,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
targetPath := req.GetTargetPath() targetPath := req.GetTargetPath()
// considering kubelet make sure node operations like unpublish/unstage...etc can not be called // considering kubelet make sure node operations like unpublish/unstage...etc can not be called
// at same time, an explicit locking at time of nodeunpublish is not required. // at same time, an explicit locking at time of nodeunpublish is not required.
notMnt, err := mount.IsNotMountPoint(ns.mounter, targetPath) notMnt, err := mount.IsNotMountPoint(ns.Mounter, targetPath)
if err != nil { if err != nil {
if os.IsNotExist(err) { if os.IsNotExist(err) {
// targetPath has already been deleted // targetPath has already been deleted
@ -792,7 +792,7 @@ func (ns *NodeServer) NodeUnpublishVolume(
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
if err = ns.mounter.Unmount(targetPath); err != nil { if err = ns.Mounter.Unmount(targetPath); err != nil {
return nil, status.Error(codes.Internal, err.Error()) return nil, status.Error(codes.Internal, err.Error())
} }
@ -839,7 +839,7 @@ func (ns *NodeServer) NodeUnstageVolume(
stagingParentPath := req.GetStagingTargetPath() stagingParentPath := req.GetStagingTargetPath()
stagingTargetPath := getStagingTargetPath(req) stagingTargetPath := getStagingTargetPath(req)
notMnt, err := mount.IsNotMountPoint(ns.mounter, stagingTargetPath) notMnt, err := mount.IsNotMountPoint(ns.Mounter, stagingTargetPath)
if err != nil { if err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return nil, status.Error(codes.NotFound, err.Error()) return nil, status.Error(codes.NotFound, err.Error())
@ -849,7 +849,7 @@ func (ns *NodeServer) NodeUnstageVolume(
} }
if !notMnt { if !notMnt {
// Unmounting the image // Unmounting the image
err = ns.mounter.Unmount(stagingTargetPath) err = ns.Mounter.Unmount(stagingTargetPath)
if err != nil { if err != nil {
log.ExtendedLog(ctx, "failed to unmount targetPath: %s with error: %v", stagingTargetPath, err) log.ExtendedLog(ctx, "failed to unmount targetPath: %s with error: %v", stagingTargetPath, err)
@ -1061,7 +1061,7 @@ func (ns *NodeServer) processEncryptedDevice(
// make sure we continue with the encrypting of the device // make sure we continue with the encrypting of the device
fallthrough fallthrough
case encrypted == rbdImageEncryptionPrepared: case encrypted == rbdImageEncryptionPrepared:
diskMounter := &mount.SafeFormatAndMount{Interface: ns.mounter, Exec: utilexec.New()} diskMounter := &mount.SafeFormatAndMount{Interface: ns.Mounter, Exec: utilexec.New()}
// TODO: update this when adding support for static (pre-provisioned) PVs // TODO: update this when adding support for static (pre-provisioned) PVs
var existingFormat string var existingFormat string
existingFormat, err = diskMounter.GetDiskFormat(devicePath) existingFormat, err = diskMounter.GetDiskFormat(devicePath)
@ -1109,7 +1109,7 @@ func (ns *NodeServer) xfsSupportsReflink() bool {
// run mkfs.xfs in the same namespace as formatting would be done in // run mkfs.xfs in the same namespace as formatting would be done in
// mountVolumeToStagePath() // mountVolumeToStagePath()
diskMounter := &mount.SafeFormatAndMount{Interface: ns.mounter, Exec: utilexec.New()} diskMounter := &mount.SafeFormatAndMount{Interface: ns.Mounter, Exec: utilexec.New()}
out, err := diskMounter.Exec.Command("mkfs.xfs").CombinedOutput() out, err := diskMounter.Exec.Command("mkfs.xfs").CombinedOutput()
if err != nil { if err != nil {
// mkfs.xfs should fail with an error message (and help text) // mkfs.xfs should fail with an error message (and help text)