cephfs: Support mount option on nodeplugin

add mount options on nodeplugin side

Signed-off-by: takeaki-matsumoto <takeaki.matsumoto@linecorp.com>
This commit is contained in:
takeaki-matsumoto
2022-07-07 00:46:12 +09:00
committed by mergify[bot]
parent ceb88d6498
commit 1025871021
8 changed files with 51 additions and 8 deletions

View File

@ -64,10 +64,18 @@ func NewControllerServer(d *csicommon.CSIDriver) *ControllerServer {
}
// NewNodeServer initialize a node server for ceph CSI driver.
func NewNodeServer(d *csicommon.CSIDriver, t string, topology map[string]string) *NodeServer {
func NewNodeServer(
d *csicommon.CSIDriver,
t string,
topology map[string]string,
kernelMountOptions string,
fuseMountOptions string,
) *NodeServer {
return &NodeServer{
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
VolumeLocks: util.NewVolumeLocks(),
DefaultNodeServer: csicommon.NewDefaultNodeServer(d, t, topology),
VolumeLocks: util.NewVolumeLocks(),
kernelMountOptions: kernelMountOptions,
fuseMountOptions: fuseMountOptions,
}
}
@ -122,7 +130,7 @@ func (fs *Driver) Run(conf *util.Config) {
if err != nil {
log.FatalLogMsg(err.Error())
}
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology, conf.KernelMountOptions, conf.FuseMountOptions)
}
if conf.IsControllerServer {
@ -133,7 +141,7 @@ func (fs *Driver) Run(conf *util.Config) {
if err != nil {
log.FatalLogMsg(err.Error())
}
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology)
fs.ns = NewNodeServer(fs.cd, conf.Vtype, topology, conf.KernelMountOptions, conf.FuseMountOptions)
fs.cs = NewControllerServer(fs.cd)
}

View File

@ -43,7 +43,9 @@ type NodeServer struct {
*csicommon.DefaultNodeServer
// A map storing all volumes with ongoing operations so that additional operations
// for that same volume (as defined by VolumeID) return an Aborted error
VolumeLocks *util.VolumeLocks
VolumeLocks *util.VolumeLocks
kernelMountOptions string
fuseMountOptions string
}
func getCredentialsForVolume(
@ -225,7 +227,7 @@ func (ns *NodeServer) NodeStageVolume(
return &csi.NodeStageVolumeResponse{}, nil
}
func (*NodeServer) mount(
func (ns *NodeServer) mount(
ctx context.Context,
mnt mounter.VolumeMounter,
volOptions *store.VolumeOptions,
@ -244,6 +246,13 @@ func (*NodeServer) mount(
log.DebugLog(ctx, "cephfs: mounting volume %s with %s", volID, mnt.Name())
switch mnt.(type) {
case *mounter.FuseMounter:
volOptions.FuseMountOptions = util.MountOptionsAdd(volOptions.FuseMountOptions, ns.fuseMountOptions)
case *mounter.KernelMounter:
volOptions.KernelMountOptions = util.MountOptionsAdd(volOptions.KernelMountOptions, ns.kernelMountOptions)
}
const readOnly = "ro"
if volCap.AccessMode.Mode == csi.VolumeCapability_AccessMode_MULTI_NODE_READER_ONLY ||

View File

@ -98,7 +98,12 @@ type Config struct {
MetricsPath string // path of prometheus endpoint where metrics will be available
HistogramOption string // Histogram option for grpc metrics, should be comma separated value,
// ex:= "0.5,2,6" where start=0.5 factor=2, count=6
MetricsIP string // TCP port for liveness/ metrics requests
MetricsIP string // TCP port for liveness/ metrics requests
// mount option related flags
KernelMountOptions string // Comma separated string of mount options accepted by cephfs kernel mounter
FuseMountOptions string // Comma separated string of mount options accepted by ceph-fuse mounter
PidLimit int // PID limit to configure through cgroups")
MetricsPort int // TCP port for liveness/grpc metrics requests
PollTime time.Duration // time interval in seconds between each poll